From 6f231dda68080759f1aed3769896e94c73099f0f Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Sat, 2 Jul 2011 22:56:22 -0700 Subject: isci: Intel(R) C600 Series Chipset Storage Control Unit Driver Support for the up to 2x4-port 6Gb/s SAS controllers embedded in the chipset. This is a snapshot of the first publicly available version of the driver, commit 4c1db2d0 in the 'historical' branch. git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git historical Signed-off-by: Maciej Trela <maciej.trela@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Edmund Nadolski <edmund.nadolski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.h | 429 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 429 insertions(+) create mode 100644 drivers/scsi/isci/request.h (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h new file mode 100644 index 00000000000..5079d4a7c41 --- /dev/null +++ b/drivers/scsi/isci/request.h @@ -0,0 +1,429 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(_ISCI_REQUEST_H_) +#define _ISCI_REQUEST_H_ + +#include "isci.h" + +/** + * struct isci_request_status - This enum defines the possible states of an I/O + * request. + * + * + */ +enum isci_request_status { + unallocated = 0x00, + allocated = 0x01, + started = 0x02, + completed = 0x03, + aborting = 0x04, + aborted = 0x05, + terminating = 0x06 +}; + +enum task_type { + io_task = 0, + tmf_task = 1 +}; + +/** + * struct isci_request - This class represents the request object used to track + * IO, smp and TMF request internal. It wraps the SCIC request object. + * + * + */ +struct isci_request { + + struct scic_sds_request *sci_request_handle; + + enum isci_request_status status; + enum task_type ttype; + unsigned short io_tag; + bool complete_in_target; + + union ttype_ptr_union { + struct sas_task *io_task_ptr; /* When ttype==io_task */ + struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ + } ttype_ptr; + struct isci_host *isci_host; + struct isci_remote_device *isci_device; + /* For use in the requests_to_{complete|abort} lists: */ + struct list_head completed_node; + /* For use in the reqs_in_process list: */ + struct list_head dev_node; + void *sci_request_mem_ptr; + spinlock_t state_lock; + dma_addr_t request_daddr; + dma_addr_t zero_scatter_daddr; + + unsigned int num_sg_entries; /* returned by pci_alloc_sg */ + unsigned int request_alloc_size; /* size of block from dma_pool_alloc */ + + /** Note: "io_request_completion" is completed in two different ways + * depending on whether this is a TMF or regular request. + * - TMF requests are completed in the thread that started them; + * - regular requests are completed in the request completion callback + * function. + * This difference in operation allows the aborter of a TMF request + * to be sure that once the TMF request completes, the I/O that the + * TMF was aborting is guaranteed to have completed. + */ + struct completion *io_request_completion; +}; + +/** + * This function gets the status of the request object. + * @request: This parameter points to the isci_request object + * + * status of the object as a isci_request_status enum. + */ +static inline +enum isci_request_status isci_request_get_state( + struct isci_request *isci_request) +{ + BUG_ON(isci_request == NULL); + + /*probably a bad sign... */ + if (isci_request->status == unallocated) + dev_warn(&isci_request->isci_host->pdev->dev, + "%s: isci_request->status == unallocated\n", + __func__); + + return isci_request->status; +} + + +/** + * isci_request_change_state() - This function sets the status of the request + * object. + * @request: This parameter points to the isci_request object + * @status: This Parameter is the new status of the object + * + */ +static inline enum isci_request_status isci_request_change_state( + struct isci_request *isci_request, + enum isci_request_status status) +{ + enum isci_request_status old_state; + unsigned long flags; + + dev_dbg(&isci_request->isci_host->pdev->dev, + "%s: isci_request = %p, state = 0x%x\n", + __func__, + isci_request, + status); + + BUG_ON(isci_request == NULL); + + spin_lock_irqsave(&isci_request->state_lock, flags); + old_state = isci_request->status; + isci_request->status = status; + spin_unlock_irqrestore(&isci_request->state_lock, flags); + + return old_state; +} + +/** + * isci_request_change_started_to_newstate() - This function sets the status of + * the request object. + * @request: This parameter points to the isci_request object + * @status: This Parameter is the new status of the object + * + * state previous to any change. + */ +static inline enum isci_request_status isci_request_change_started_to_newstate( + struct isci_request *isci_request, + struct completion *completion_ptr, + enum isci_request_status newstate) +{ + enum isci_request_status old_state; + unsigned long flags; + + BUG_ON(isci_request == NULL); + + spin_lock_irqsave(&isci_request->state_lock, flags); + + old_state = isci_request->status; + + if (old_state == started) { + BUG_ON(isci_request->io_request_completion != NULL); + + isci_request->io_request_completion = completion_ptr; + isci_request->status = newstate; + } + spin_unlock_irqrestore(&isci_request->state_lock, flags); + + dev_dbg(&isci_request->isci_host->pdev->dev, + "%s: isci_request = %p, old_state = 0x%x\n", + __func__, + isci_request, + old_state); + + return old_state; +} + +/** + * isci_request_change_started_to_aborted() - This function sets the status of + * the request object. + * @request: This parameter points to the isci_request object + * @completion_ptr: This parameter is saved as the kernel completion structure + * signalled when the old request completes. + * + * state previous to any change. + */ +static inline enum isci_request_status isci_request_change_started_to_aborted( + struct isci_request *isci_request, + struct completion *completion_ptr) +{ + return isci_request_change_started_to_newstate( + isci_request, completion_ptr, aborted + ); +} +/** + * isci_request_free() - This function frees the request object. + * @isci_host: This parameter specifies the ISCI host object + * @isci_request: This parameter points to the isci_request object + * + */ +static inline void isci_request_free( + struct isci_host *isci_host, + struct isci_request *isci_request) +{ + BUG_ON(isci_request == NULL); + + /* release the dma memory if we fail. */ + dma_pool_free(isci_host->dma_pool, isci_request, + isci_request->request_daddr); +} + + +/* #define ISCI_REQUEST_VALIDATE_ACCESS + */ + +#ifdef ISCI_REQUEST_VALIDATE_ACCESS + +static inline +struct sas_task *isci_request_access_task(struct isci_request *isci_request) +{ + BUG_ON(isci_request->ttype != io_task); + return isci_request->ttype_ptr.io_task_ptr; +} + +static inline +struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request) +{ + BUG_ON(isci_request->ttype != tmf_task); + return isci_request->ttype_ptr.tmf_task_ptr; +} + +#else /* not ISCI_REQUEST_VALIDATE_ACCESS */ + +#define isci_request_access_task(RequestPtr) \ + ((RequestPtr)->ttype_ptr.io_task_ptr) + +#define isci_request_access_tmf(RequestPtr) \ + ((RequestPtr)->ttype_ptr.tmf_task_ptr) + +#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */ + + +int isci_request_alloc_tmf( + struct isci_host *isci_host, + struct isci_tmf *isci_tmf, + struct isci_request **isci_request, + struct isci_remote_device *isci_device, + gfp_t gfp_flags); + + +int isci_request_execute( + struct isci_host *isci_host, + struct sas_task *task, + struct isci_request **request, + gfp_t gfp_flags); + +/** + * isci_request_unmap_sgl() - This function unmaps the DMA address of a given + * sgl + * @request: This parameter points to the isci_request object + * @*pdev: This Parameter is the pci_device struct for the controller + * + */ +static inline void isci_request_unmap_sgl( + struct isci_request *request, + struct pci_dev *pdev) +{ + struct sas_task *task = isci_request_access_task(request); + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p, task = %p,\n" + "task->data_dir = %d, is_sata = %d\n ", + __func__, + request, + task, + task->data_dir, + sas_protocol_ata(task->task_proto)); + + if ((task->data_dir != PCI_DMA_NONE) && + !sas_protocol_ata(task->task_proto)) { + if (task->num_scatter == 0) + /* 0 indicates a single dma address */ + dma_unmap_single( + &pdev->dev, + request->zero_scatter_daddr, + task->total_xfer_len, + task->data_dir + ); + + else /* unmap the sgl dma addresses */ + dma_unmap_sg( + &pdev->dev, + task->scatter, + request->num_sg_entries, + task->data_dir + ); + } +} + + +void isci_request_io_request_complete( + struct isci_host *isci_host, + struct isci_request *request, + enum sci_io_status completion_status); + +u32 isci_request_io_request_get_transfer_length( + struct isci_request *request); + +SCI_IO_REQUEST_DATA_DIRECTION isci_request_io_request_get_data_direction( + struct isci_request *request); + +/** + * isci_request_io_request_get_next_sge() - This function is called by the sci + * core to retrieve the next sge for a given request. + * @request: This parameter is the isci_request object. + * @current_sge_address: This parameter is the last sge retrieved by the sci + * core for this request. + * + * pointer to the next sge for specified request. + */ +static inline void *isci_request_io_request_get_next_sge( + struct isci_request *request, + void *current_sge_address) +{ + struct sas_task *task = isci_request_access_task(request); + void *ret = NULL; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p, " + "current_sge_address = %p, " + "num_scatter = %d\n", + __func__, + request, + current_sge_address, + task->num_scatter); + + if (!current_sge_address) /* First time through.. */ + ret = task->scatter; /* always task->scatter */ + else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */ + ret = NULL; /* there is only one element. */ + else + ret = sg_next(current_sge_address); /* sg_next returns NULL + * for the last element + */ + + dev_dbg(&request->isci_host->pdev->dev, + "%s: next sge address = %p\n", + __func__, + ret); + + return ret; +} + +dma_addr_t isci_request_sge_get_address_field( + struct isci_request *request, + void *sge_address); + +u32 isci_request_sge_get_length_field( + struct isci_request *request, + void *sge_address); + +void *isci_request_ssp_io_request_get_cdb_address( + struct isci_request *request); + +u32 isci_request_ssp_io_request_get_cdb_length( + struct isci_request *request); + +u32 isci_request_ssp_io_request_get_lun( + struct isci_request *request); + +u32 isci_request_ssp_io_request_get_task_attribute( + struct isci_request *request); + +u32 isci_request_ssp_io_request_get_command_priority( + struct isci_request *request); + + + + + +void isci_terminate_pending_requests( + struct isci_host *isci_host, + struct isci_remote_device *isci_device, + enum isci_request_status new_request_state); + + + + +#endif /* !defined(_ISCI_REQUEST_H_) */ -- cgit v1.2.3-70-g09d2 From 82d29928c1c1c6a6605895f8240a9943394244d7 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 8 Feb 2011 17:53:10 -0800 Subject: isci: kill SCI_IO_REQUEST_DATA_DIRECTION It's an unnecessary typedef that mirrors the kernel's enum dma_data_direction. Also cleanup some long variable names along the way. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/sci_types.h | 7 --- drivers/scsi/isci/core/scic_sds_request.c | 88 ++++++++++++--------------- drivers/scsi/isci/core/scic_sds_stp_request.c | 62 ++++++++----------- drivers/scsi/isci/core/scic_sds_stp_request.h | 5 +- drivers/scsi/isci/core/scic_user_callback.h | 5 +- drivers/scsi/isci/deprecated.c | 10 +-- drivers/scsi/isci/request.c | 36 +---------- drivers/scsi/isci/request.h | 3 +- 8 files changed, 73 insertions(+), 143 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/sci_types.h b/drivers/scsi/isci/core/sci_types.h index e15dc0c89e5..fda3680546e 100644 --- a/drivers/scsi/isci/core/sci_types.h +++ b/drivers/scsi/isci/core/sci_types.h @@ -61,13 +61,6 @@ #define sci_cb_make_physical_address(physical_addr, addr_upper, addr_lower) \ ((physical_addr) = (addr_lower) | ((u64)addr_upper) << 32) -typedef enum { - SCI_IO_REQUEST_DATA_IN = 0, /* Read operation */ - SCI_IO_REQUEST_DATA_OUT, /* Write operation */ - SCI_IO_REQUEST_NO_DATA -} SCI_IO_REQUEST_DATA_DIRECTION; - - enum sci_controller_mode { SCI_MODE_SPEED, /* Optimized for performance */ SCI_MODE_SIZE /* Optimized for memory use */ diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index c696d246ea5..7c5b61bdee6 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -588,34 +588,34 @@ static void scu_ssp_reqeust_construct_task_context( * */ static void scu_ssp_io_request_construct_task_context( - struct scic_sds_request *this_request, - SCI_IO_REQUEST_DATA_DIRECTION data_direction, - u32 transfer_length_bytes) + struct scic_sds_request *sci_req, + enum dma_data_direction dir, + u32 len) { struct scu_task_context *task_context; - task_context = scic_sds_request_get_task_context(this_request); + task_context = scic_sds_request_get_task_context(sci_req); - scu_ssp_reqeust_construct_task_context(this_request, task_context); + scu_ssp_reqeust_construct_task_context(sci_req, task_context); task_context->ssp_command_iu_length = sizeof(struct sci_ssp_command_iu) / sizeof(u32); task_context->type.ssp.frame_type = SCI_SAS_COMMAND_FRAME; - switch (data_direction) { - case SCI_IO_REQUEST_DATA_IN: - case SCI_IO_REQUEST_NO_DATA: + switch (dir) { + case DMA_FROM_DEVICE: + case DMA_NONE: + default: task_context->task_type = SCU_TASK_TYPE_IOREAD; break; - case SCI_IO_REQUEST_DATA_OUT: + case DMA_TO_DEVICE: task_context->task_type = SCU_TASK_TYPE_IOWRITE; break; } - task_context->transfer_length_bytes = transfer_length_bytes; + task_context->transfer_length_bytes = len; - if (task_context->transfer_length_bytes > 0) { - scic_sds_request_build_sgl(this_request); - } + if (task_context->transfer_length_bytes > 0) + scic_sds_request_build_sgl(sci_req); } @@ -694,37 +694,35 @@ static void scu_ssp_task_request_construct_task_context( * * enum sci_status */ -static enum sci_status scic_io_request_construct_sata( - struct scic_sds_request *this_request, - u8 sat_protocol, - u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction, - bool copy_rx_frame) +static enum sci_status scic_io_request_construct_sata(struct scic_sds_request *sci_req, + u8 proto, u32 len, + enum dma_data_direction dir, + bool copy) { enum sci_status status = SCI_SUCCESS; - switch (sat_protocol) { + switch (proto) { case SAT_PROTOCOL_PIO_DATA_IN: case SAT_PROTOCOL_PIO_DATA_OUT: - status = scic_sds_stp_pio_request_construct(this_request, sat_protocol, copy_rx_frame); + status = scic_sds_stp_pio_request_construct(sci_req, proto, copy); break; case SAT_PROTOCOL_UDMA_DATA_IN: case SAT_PROTOCOL_UDMA_DATA_OUT: - status = scic_sds_stp_udma_request_construct(this_request, transfer_length, data_direction); + status = scic_sds_stp_udma_request_construct(sci_req, len, dir); break; case SAT_PROTOCOL_ATA_HARD_RESET: case SAT_PROTOCOL_SOFT_RESET: - status = scic_sds_stp_soft_reset_request_construct(this_request); + status = scic_sds_stp_soft_reset_request_construct(sci_req); break; case SAT_PROTOCOL_NON_DATA: - status = scic_sds_stp_non_data_request_construct(this_request); + status = scic_sds_stp_non_data_request_construct(sci_req); break; case SAT_PROTOCOL_FPDMA: - status = scic_sds_stp_ncq_request_construct(this_request, transfer_length, data_direction); + status = scic_sds_stp_ncq_request_construct(sci_req, len, dir); break; #if !defined(DISABLE_ATAPI) @@ -733,7 +731,7 @@ static enum sci_status scic_io_request_construct_sata( case SAT_PROTOCOL_PACKET_DMA_DATA_OUT: case SAT_PROTOCOL_PACKET_PIO_DATA_IN: case SAT_PROTOCOL_PACKET_PIO_DATA_OUT: - status = scic_sds_stp_packet_request_construct(this_request); + status = scic_sds_stp_packet_request_construct(sci_req); break; #endif @@ -743,10 +741,10 @@ static enum sci_status scic_io_request_construct_sata( case SAT_PROTOCOL_DEVICE_RESET: case SAT_PROTOCOL_RETURN_RESPONSE_INFO: default: - dev_err(scic_to_dev(this_request->owning_controller), + dev_err(scic_to_dev(sci_req->owning_controller), "%s: SCIC IO Request 0x%p received un-handled " "SAT Protocl %d.\n", - __func__, this_request, sat_protocol); + __func__, sci_req, proto); status = SCI_FAILURE; break; @@ -945,35 +943,25 @@ enum sci_status scic_task_request_construct_ssp( } -enum sci_status scic_io_request_construct_basic_sata( - struct scic_sds_request *sci_req) +enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) { enum sci_status status; - struct scic_sds_stp_request *this_stp_request; - u8 sat_protocol; - u32 transfer_length; - SCI_IO_REQUEST_DATA_DIRECTION data_direction; - bool copy_rx_frame = false; + struct scic_sds_stp_request *stp_req; + u8 proto; + u32 len; + enum dma_data_direction dir; + bool copy = false; - this_stp_request = (struct scic_sds_stp_request *)sci_req; + stp_req = container_of(sci_req, typeof(*stp_req), parent); sci_req->protocol = SCIC_STP_PROTOCOL; - transfer_length = - scic_cb_io_request_get_transfer_length(sci_req->user_request); - data_direction = - scic_cb_io_request_get_data_direction(sci_req->user_request); + len = scic_cb_io_request_get_transfer_length(sci_req->user_request); + dir = scic_cb_io_request_get_data_direction(sci_req->user_request); + proto = scic_cb_request_get_sat_protocol(sci_req->user_request); + copy = scic_cb_io_request_do_copy_rx_frames(stp_req->parent.user_request); - sat_protocol = scic_cb_request_get_sat_protocol(sci_req->user_request); - copy_rx_frame = scic_cb_io_request_do_copy_rx_frames(this_stp_request->parent.user_request); - - status = scic_io_request_construct_sata( - sci_req, - sat_protocol, - transfer_length, - data_direction, - copy_rx_frame - ); + status = scic_io_request_construct_sata(sci_req, proto, len, dir, copy); if (status == SCI_SUCCESS) sci_base_state_machine_change_state( diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c index c14f6f10edb..49c494c097c 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ b/drivers/scsi/isci/core/scic_sds_stp_request.c @@ -288,7 +288,7 @@ void scic_sds_stp_non_ncq_request_construct( /** * - * @this_request: This parameter specifies the request to be constructed as an + * @sci_req: This parameter specifies the request to be constructed as an * optimized request. * @optimized_task_type: This parameter specifies whether the request is to be * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A @@ -298,24 +298,23 @@ void scic_sds_stp_non_ncq_request_construct( * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ -static void scic_sds_stp_optimized_request_construct( - struct scic_sds_request *this_request, - u8 optimized_task_type, - u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction) +static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, + u8 optimized_task_type, + u32 len, + enum dma_data_direction dir) { - struct scu_task_context *task_context = this_request->task_context_buffer; + struct scu_task_context *task_context = sci_req->task_context_buffer; /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(this_request, task_context); + scu_sata_reqeust_construct_task_context(sci_req, task_context); /* Copy over the SGL elements */ - scic_sds_request_build_sgl(this_request); + scic_sds_request_build_sgl(sci_req); /* Copy over the number of bytes to be transfered */ - task_context->transfer_length_bytes = transfer_length; + task_context->transfer_length_bytes = len; - if (data_direction == SCI_IO_REQUEST_DATA_OUT) { + if (dir == DMA_TO_DEVICE) { /* * The difference between the DMA IN and DMA OUT request task type * values are consistent with the difference between FPDMA READ @@ -334,29 +333,24 @@ static void scic_sds_stp_optimized_request_construct( /** * - * @this_request: This parameter specifies the request to be constructed. + * @sci_req: This parameter specifies the request to be constructed. * * This method will construct the STP UDMA request and its associated TC data. * This method returns an indication as to whether the construction was * successful. SCI_SUCCESS Currently this method always returns this value. */ -enum sci_status scic_sds_stp_udma_request_construct( - struct scic_sds_request *this_request, - u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction) +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) { - scic_sds_stp_non_ncq_request_construct(this_request); + scic_sds_stp_non_ncq_request_construct(sci_req); - scic_sds_stp_optimized_request_construct( - this_request, - SCU_TASK_TYPE_DMA_IN, - transfer_length, - data_direction - ); + scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, + len, dir); sci_base_state_machine_construct( - &this_request->started_substate_machine, - &this_request->parent.parent, + &sci_req->started_substate_machine, + &sci_req->parent.parent, scic_sds_stp_request_started_udma_substate_table, SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE ); @@ -366,23 +360,19 @@ enum sci_status scic_sds_stp_udma_request_construct( /** * - * @this_request: This parameter specifies the request to be constructed. + * @sci_req: This parameter specifies the request to be constructed. * * This method will construct the STP UDMA request and its associated TC data. * This method returns an indication as to whether the construction was * successful. SCI_SUCCESS Currently this method always returns this value. */ -enum sci_status scic_sds_stp_ncq_request_construct( - struct scic_sds_request *this_request, - u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction) +enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) { - scic_sds_stp_optimized_request_construct( - this_request, - SCU_TASK_TYPE_FPDMAQ_READ, - transfer_length, - data_direction - ); + scic_sds_stp_optimized_request_construct(sci_req, + SCU_TASK_TYPE_FPDMAQ_READ, + len, dir); return SCI_SUCCESS; } diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.h b/drivers/scsi/isci/core/scic_sds_stp_request.h index 5578d2baf7c..0a12ff6417c 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.h +++ b/drivers/scsi/isci/core/scic_sds_stp_request.h @@ -56,6 +56,7 @@ #ifndef _SCIC_SDS_STP_REQUEST_T_ #define _SCIC_SDS_STP_REQUEST_T_ +#include <linux/dma-mapping.h> #include "intel_sata.h" #include "sci_types.h" #include "scic_sds_request.h" @@ -201,7 +202,7 @@ enum sci_status scic_sds_stp_pio_request_construct_pass_through( enum sci_status scic_sds_stp_udma_request_construct( struct scic_sds_request *this_request, u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction); + enum dma_data_direction dir); enum sci_status scic_sds_stp_non_data_request_construct( struct scic_sds_request *this_request); @@ -212,7 +213,7 @@ enum sci_status scic_sds_stp_soft_reset_request_construct( enum sci_status scic_sds_stp_ncq_request_construct( struct scic_sds_request *this_request, u32 transfer_length, - SCI_IO_REQUEST_DATA_DIRECTION data_direction); + enum dma_data_direction dir); void scu_stp_raw_request_construct_task_context( struct scic_sds_stp_request *this_request, diff --git a/drivers/scsi/isci/core/scic_user_callback.h b/drivers/scsi/isci/core/scic_user_callback.h index 4aa020e2f07..4df7106f61a 100644 --- a/drivers/scsi/isci/core/scic_user_callback.h +++ b/drivers/scsi/isci/core/scic_user_callback.h @@ -246,11 +246,8 @@ u32 scic_cb_io_request_get_transfer_length( * object. It is a cookie that allows the user to provide the necessary * information for this callback. * - * This method returns the value of SCI_IO_REQUEST_DATA_OUT or - * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. */ -SCI_IO_REQUEST_DATA_DIRECTION scic_cb_io_request_get_data_direction( - void *scic_user_io_request); +enum dma_data_direction scic_cb_io_request_get_data_direction(void *req); #ifndef SCI_SGL_OPTIMIZATION_ENABLED /** diff --git a/drivers/scsi/isci/deprecated.c b/drivers/scsi/isci/deprecated.c index 847e6874e1a..0ee6679b810 100644 --- a/drivers/scsi/isci/deprecated.c +++ b/drivers/scsi/isci/deprecated.c @@ -139,16 +139,10 @@ u32 scic_cb_io_request_get_transfer_length( * @scic_user_io_request: This parameter points to the user's IO request * object. It is a cookie that allows the user to provide the necessary * information for this callback. - * - * This method returns the value of SCI_IO_REQUEST_DATA_OUT or - * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. */ -SCI_IO_REQUEST_DATA_DIRECTION scic_cb_io_request_get_data_direction( - void *scic_user_io_request) +enum dma_data_direction scic_cb_io_request_get_data_direction(void *req) { - return isci_request_io_request_get_data_direction( - scic_user_io_request - ); + return isci_request_io_request_get_data_direction(req); } diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index e564121b672..f7ba047d64c 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1237,44 +1237,12 @@ u32 isci_request_io_request_get_transfer_length(struct isci_request *request) * * data direction for specified request. */ -SCI_IO_REQUEST_DATA_DIRECTION isci_request_io_request_get_data_direction( +enum dma_data_direction isci_request_io_request_get_data_direction( struct isci_request *request) { struct sas_task *task = isci_request_access_task(request); - SCI_IO_REQUEST_DATA_DIRECTION ret; - switch (task->data_dir) { - - case DMA_FROM_DEVICE: - ret = SCI_IO_REQUEST_DATA_IN; - dev_dbg(&request->isci_host->pdev->dev, - "%s: request=%p, FROM_DEVICE\n", - __func__, - request); - break; - - case DMA_TO_DEVICE: - ret = SCI_IO_REQUEST_DATA_OUT; - dev_dbg(&request->isci_host->pdev->dev, - "%s: request=%p, TO_DEVICE\n", - __func__, - request); - break; - - case DMA_BIDIRECTIONAL: - case DMA_NONE: - default: - ret = SCI_IO_REQUEST_NO_DATA; - dev_dbg(&request->isci_host->pdev->dev, - "%s: request=%p, unhandled direction case, " - "data_dir=%d\n", - __func__, - request, - task->data_dir); - break; - - } - return ret; + return task->data_dir; } /** diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 5079d4a7c41..166295ee8cf 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -346,8 +346,7 @@ void isci_request_io_request_complete( u32 isci_request_io_request_get_transfer_length( struct isci_request *request); -SCI_IO_REQUEST_DATA_DIRECTION isci_request_io_request_get_data_direction( - struct isci_request *request); +enum dma_data_direction isci_request_io_request_get_data_direction(struct isci_request *req); /** * isci_request_io_request_get_next_sge() - This function is called by the sci -- cgit v1.2.3-70-g09d2 From 4dc043c41037fc6e369270daaa626465a8766565 Mon Sep 17 00:00:00 2001 From: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Date: Fri, 4 Mar 2011 14:06:52 -0800 Subject: isci: Termination handling cleanup, added termination timeouts. Added a request "dead" state for use when a termination wait times-out. isci_terminate_pending_requests now detaches the device's pending list and terminates each entry on the detached list. Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.h | 3 +- drivers/scsi/isci/task.c | 233 +++++++++++++++++++++++++++----------------- 2 files changed, 143 insertions(+), 93 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 166295ee8cf..b45c0f1f057 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -71,7 +71,8 @@ enum isci_request_status { completed = 0x03, aborting = 0x04, aborted = 0x05, - terminating = 0x06 + terminating = 0x06, + dead = 0x07 }; enum task_type { diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 5e80e844922..c781a4ab4a5 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -663,6 +663,59 @@ static void isci_request_cleanup_completed_loiterer( } isci_request_free(isci_host, isci_request); } + +/** +* @isci_termination_timed_out(): this function will deal with a request for +* which the wait for termination has timed-out. +* +* @isci_host This SCU. +* @isci_request The I/O request being terminated. +*/ +static void +isci_termination_timed_out( + struct isci_host * host, + struct isci_request * request + ) +{ + unsigned long state_flags; + + dev_warn(&host->pdev->dev, + "%s: host = %p; request = %p\n", + __func__, host, request); + + /* At this point, the request to terminate + * has timed out. The best we can do is to + * have the request die a silent death + * if it ever completes. + */ + spin_lock_irqsave(&request->state_lock, state_flags); + + if (request->status == started) { + + /* Set the request state to "dead", + * and clear the task pointer so that an actual + * completion event callback doesn't do + * anything. + */ + request->status = dead; + + /* Clear the timeout completion event pointer.*/ + request->io_request_completion = NULL; + + if (request->ttype == io_task) { + + /* Break links with the sas_task. */ + if (request->ttype_ptr.io_task_ptr != NULL) { + + request->ttype_ptr.io_task_ptr->lldd_task = NULL; + request->ttype_ptr.io_task_ptr = NULL; + } + } + } + spin_unlock_irqrestore(&request->state_lock, state_flags); +} + + /** * isci_terminate_request_core() - This function will terminate the given * request, and wait for it to complete. This function must only be called @@ -684,35 +737,20 @@ static void isci_terminate_request_core( bool needs_cleanup_handling = false; enum isci_request_status request_status; unsigned long flags; + unsigned long timeout_remaining; + dev_dbg(&isci_host->pdev->dev, "%s: device = %p; request = %p\n", __func__, isci_device, isci_request); - /* Peek at the current status of the request. This will tell - * us if there was special handling on the request such that it - * needs to be detached and freed here. - */ - spin_lock_irqsave(&isci_request->state_lock, flags); - request_status = isci_request_get_state(isci_request); - - if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ - && ((request_status == aborted) - || (request_status == aborting) - || (request_status == terminating) - || (request_status == completed) - ) - ) { + spin_lock_irqsave(&isci_host->scic_lock, flags); - /* The completion routine won't free a request in - * the aborted/aborting/terminating state, so we do - * it here. - */ - needs_cleanup_handling = true; - } - spin_unlock_irqrestore(&isci_request->state_lock, flags); + /* Note that we are not going to control + * the target to abort the request. + */ + isci_request->complete_in_target = true; - spin_lock_irqsave(&isci_host->scic_lock, flags); /* Make sure the request wasn't just sitting around signalling * device condition (if the request handle is NULL, then the * request completed but needed additional handling here). @@ -733,13 +771,16 @@ static void isci_terminate_request_core( * fail is when the io request is completed and * being aborted. */ - if (status != SCI_SUCCESS) + if (status != SCI_SUCCESS) { dev_err(&isci_host->pdev->dev, "%s: scic_controller_terminate_request" " returned = 0x%x\n", __func__, status); - else { + /* Clear the completion pointer from the request. */ + isci_request->io_request_completion = NULL; + + } else { if (was_terminated) { dev_dbg(&isci_host->pdev->dev, "%s: before completion wait (%p)\n", @@ -747,21 +788,62 @@ static void isci_terminate_request_core( isci_request->io_request_completion); /* Wait here for the request to complete. */ - wait_for_completion(isci_request->io_request_completion); + #define TERMINATION_TIMEOUT_MSEC 50 + timeout_remaining + = wait_for_completion_timeout( + isci_request->io_request_completion, + msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC)); + + if (!timeout_remaining) { + + isci_termination_timed_out(isci_host, + isci_request); + + dev_err(&isci_host->pdev->dev, + "%s: *** Timeout waiting for " + "termination(%p/%p)\n", + __func__, + isci_request->io_request_completion, + isci_request); + + } else + dev_dbg(&isci_host->pdev->dev, + "%s: after completion wait (%p)\n", + __func__, + isci_request->io_request_completion); + } + /* Clear the completion pointer from the request. */ + isci_request->io_request_completion = NULL; - dev_dbg(&isci_host->pdev->dev, - "%s: after completion wait (%p)\n", - __func__, - isci_request->io_request_completion); + /* Peek at the status of the request. This will tell + * us if there was special handling on the request such that it + * needs to be detached and freed here. + */ + spin_lock_irqsave(&isci_request->state_lock, flags); + request_status = isci_request_get_state(isci_request); + + if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ + && ((request_status == aborted) + || (request_status == aborting) + || (request_status == terminating) + || (request_status == completed) + || (request_status == dead) + ) + ) { + + /* The completion routine won't free a request in + * the aborted/aborting/etc. states, so we do + * it here. + */ + needs_cleanup_handling = true; } + spin_unlock_irqrestore(&isci_request->state_lock, flags); if (needs_cleanup_handling) isci_request_cleanup_completed_loiterer( isci_host, isci_device, isci_request ); } - /* Clear the completion pointer from the request. */ - isci_request->io_request_completion = NULL; } static void isci_terminate_request( @@ -771,11 +853,7 @@ static void isci_terminate_request( enum isci_request_status new_request_state) { enum isci_request_status old_state; - DECLARE_COMPLETION_ONSTACK(request_completion); - unsigned long flags; - - spin_lock_irqsave(&isci_host->scic_lock, flags); /* Change state to "new_request_state" if it is currently "started" */ old_state = isci_request_change_started_to_newstate( @@ -823,73 +901,44 @@ void isci_terminate_pending_requests( struct isci_remote_device *isci_device, enum isci_request_status new_request_state) { - struct isci_request *isci_request; - struct sas_task *task; - bool done = false; - unsigned long flags; + struct isci_request *request; + struct isci_request *next_request; + unsigned long flags; + struct list_head aborted_request_list; + + INIT_LIST_HEAD(&aborted_request_list); dev_dbg(&isci_host->pdev->dev, "%s: isci_device = %p (new request state = %d)\n", __func__, isci_device, new_request_state); - #define ISCI_TERMINATE_SHOW_PENDING_REQUESTS - #ifdef ISCI_TERMINATE_SHOW_PENDING_REQUESTS - { - struct isci_request *request; - - /* Only abort the task if it's in the - * device's request_in_process list - */ - list_for_each_entry(request, - &isci_device->reqs_in_process, - dev_node) - dev_dbg(&isci_host->pdev->dev, - "%s: isci_device = %p; request is on " - "reqs_in_process list: %p\n", - __func__, isci_device, request); - } - #endif /* ISCI_TERMINATE_SHOW_PENDING_REQUESTS */ - - /* Clean up all pending requests. */ - do { - spin_lock_irqsave(&isci_host->scic_lock, flags); - - if (list_empty(&isci_device->reqs_in_process)) { - - done = true; - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_lock_irqsave(&isci_host->scic_lock, flags); - dev_dbg(&isci_host->pdev->dev, - "%s: isci_device = %p; done.\n", - __func__, isci_device); - } else { - /* The list was not empty - grab the first request. */ - isci_request = list_first_entry( - &isci_device->reqs_in_process, - struct isci_request, dev_node - ); - /* Note that we are not expecting to have to control - * the target to abort the request. - */ - isci_request->complete_in_target = true; + /* Move all of the pending requests off of the device list. */ + list_splice_init(&isci_device->reqs_in_process, + &aborted_request_list); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); - /* Get the libsas task reference. */ - task = isci_request_access_task(isci_request); + /* Iterate through the now-local list. */ + list_for_each_entry_safe(request, next_request, + &aborted_request_list, dev_node) { - dev_dbg(&isci_host->pdev->dev, - "%s: isci_device=%p request=%p; task=%p\n", - __func__, isci_device, isci_request, task); + dev_warn(&isci_host->pdev->dev, + "%s: isci_device=%p request=%p; task=%p\n", + __func__, + isci_device, request, + ((request->ttype == io_task) + ? isci_request_access_task(request) + : NULL)); - /* Mark all still pending I/O with the selected next - * state. - */ - isci_terminate_request(isci_host, isci_device, - isci_request, new_request_state - ); - } - } while (!done); + /* Mark all still pending I/O with the selected next + * state, terminate and free it. + */ + isci_terminate_request(isci_host, isci_device, + request, new_request_state + ); + } } /** -- cgit v1.2.3-70-g09d2 From 35173d579a08c0d145b3020039d3ba33fbf2c184 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Sat, 26 Mar 2011 16:43:01 -0700 Subject: isci: namespacecheck cleanups * mark needlessly global routines static * delete unused functions * move kernel-doc blocks from header files to source * reorder some functions to delete declarations * more default handler cleanups phy Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/sci_base_port.h | 12 - drivers/scsi/isci/core/scic_config_parameters.h | 13 - drivers/scsi/isci/core/scic_controller.h | 348 ----------- drivers/scsi/isci/core/scic_io_request.h | 2 +- drivers/scsi/isci/core/scic_port.h | 98 --- drivers/scsi/isci/core/scic_sds_controller.c | 675 +++++++++++++-------- drivers/scsi/isci/core/scic_sds_controller.h | 60 -- drivers/scsi/isci/core/scic_sds_phy.c | 324 +++------- drivers/scsi/isci/core/scic_sds_phy.h | 35 -- drivers/scsi/isci/core/scic_sds_port.c | 444 +++++++------- drivers/scsi/isci/core/scic_sds_port.h | 53 -- drivers/scsi/isci/core/scic_sds_remote_device.c | 48 +- drivers/scsi/isci/core/scic_sds_remote_device.h | 5 - .../scsi/isci/core/scic_sds_remote_node_context.c | 46 +- .../scsi/isci/core/scic_sds_remote_node_context.h | 8 - drivers/scsi/isci/core/scic_sds_request.c | 309 ++++------ drivers/scsi/isci/core/scic_sds_request.h | 7 - .../scsi/isci/core/scic_sds_smp_remote_device.c | 2 +- drivers/scsi/isci/core/scic_sds_smp_request.c | 137 +++-- drivers/scsi/isci/core/scic_sds_ssp_request.c | 2 +- drivers/scsi/isci/core/scic_sds_stp_pio_request.h | 11 - .../scsi/isci/core/scic_sds_stp_remote_device.c | 2 +- drivers/scsi/isci/core/scic_sds_stp_request.c | 251 ++++---- drivers/scsi/isci/core/scic_sds_stp_request.h | 23 +- drivers/scsi/isci/firmware/README | 2 +- drivers/scsi/isci/port.c | 2 +- drivers/scsi/isci/request.c | 73 --- drivers/scsi/isci/request.h | 6 - drivers/scsi/isci/task.c | 11 +- drivers/scsi/isci/task.h | 10 - 30 files changed, 1056 insertions(+), 1963 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/sci_base_port.h b/drivers/scsi/isci/core/sci_base_port.h index 4e2031d195f..b931c3c2cdf 100644 --- a/drivers/scsi/isci/core/sci_base_port.h +++ b/drivers/scsi/isci/core/sci_base_port.h @@ -188,16 +188,4 @@ struct sci_base_port_state_handler { SCI_BASE_PORT_PHY_HANDLER_T remove_phy_handler; }; - -/** - * sci_base_port_construct() - Construct the base port object - * @this_port: This parameter specifies the base port to be constructed. - * @state_table: This parameter specifies the table of state definitions to be - * utilized for the domain state machine. - * - */ -void sci_base_port_construct( - struct sci_base_port *this_port, - const struct sci_base_state *state_table); - #endif /* _SCI_BASE_PORT_H_ */ diff --git a/drivers/scsi/isci/core/scic_config_parameters.h b/drivers/scsi/isci/core/scic_config_parameters.h index f64f24fad5a..cb6abcc7e7c 100644 --- a/drivers/scsi/isci/core/scic_config_parameters.h +++ b/drivers/scsi/isci/core/scic_config_parameters.h @@ -260,19 +260,6 @@ enum sci_status scic_user_parameters_set( struct scic_sds_controller *controller, union scic_user_parameters *user_parameters); -/** - * scic_user_parameters_get() - This method allows the user to retrieve the - * user parameters utilized by the controller. - * @controller: This parameter specifies the controller on which to set the - * user parameters. - * @user_parameters: This parameter specifies the USER_PARAMETERS object into - * which the framework shall save it's parameters. - * - */ -void scic_user_parameters_get( - struct scic_sds_controller *controller, - union scic_user_parameters *user_parameters); - /** * scic_oem_parameters_set() - This method allows the user to attempt to change * the OEM parameters utilized by the controller. diff --git a/drivers/scsi/isci/core/scic_controller.h b/drivers/scsi/isci/core/scic_controller.h index 481e0de5148..236c583162e 100644 --- a/drivers/scsi/isci/core/scic_controller.h +++ b/drivers/scsi/isci/core/scic_controller.h @@ -56,14 +56,6 @@ #ifndef _SCIC_CONTROLLER_H_ #define _SCIC_CONTROLLER_H_ -/** - * This file contains all of the interface methods that can be called by an - * SCIC user on a controller object. - * - * - */ - - #include "sci_status.h" #include "sci_controller.h" #include "scic_config_parameters.h" @@ -79,417 +71,77 @@ enum sci_controller_mode { SCI_MODE_SIZE /* Optimized for memory use */ }; - -/** - * scic_controller_construct() - This method will attempt to construct a - * controller object utilizing the supplied parameter information. - * @c: This parameter specifies the controller to be constructed. - * @scu_base: mapped base address of the scu registers - * @smu_base: mapped base address of the smu registers - * - * Indicate if the controller was successfully constructed or if it failed in - * some way. SCI_SUCCESS This value is returned if the controller was - * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned - * if the interrupt coalescence timer may cause SAS compliance issues for SMP - * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE - * This value is returned if the controller does not support the supplied type. - * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the - * controller does not support the supplied initialization data version. - */ enum sci_status scic_controller_construct(struct scic_sds_controller *c, void __iomem *scu_base, void __iomem *smu_base); -/** - * scic_controller_enable_interrupts() - This method will enable all controller - * interrupts. - * @controller: This parameter specifies the controller for which to enable - * interrupts. - * - */ void scic_controller_enable_interrupts( struct scic_sds_controller *controller); -/** - * scic_controller_disable_interrupts() - This method will disable all - * controller interrupts. - * @controller: This parameter specifies the controller for which to disable - * interrupts. - * - */ void scic_controller_disable_interrupts( struct scic_sds_controller *controller); - -/** - * scic_controller_initialize() - This method will initialize the controller - * hardware managed by the supplied core controller object. This method - * will bring the physical controller hardware out of reset and enable the - * core to determine the capabilities of the hardware being managed. Thus, - * the core controller can determine it's exact physical (DMA capable) - * memory requirements. - * @controller: This parameter specifies the controller to be initialized. - * - * The SCI Core user must have called scic_controller_construct() on the - * supplied controller object previously. Indicate if the controller was - * successfully initialized or if it failed in some way. SCI_SUCCESS This value - * is returned if the controller hardware was successfully initialized. - */ enum sci_status scic_controller_initialize( struct scic_sds_controller *controller); -/** - * scic_controller_get_suggested_start_timeout() - This method returns the - * suggested scic_controller_start() timeout amount. The user is free to - * use any timeout value, but this method provides the suggested minimum - * start timeout value. The returned value is based upon empirical - * information determined as a result of interoperability testing. - * @controller: the handle to the controller object for which to return the - * suggested start timeout. - * - * This method returns the number of milliseconds for the suggested start - * operation timeout. - */ u32 scic_controller_get_suggested_start_timeout( struct scic_sds_controller *controller); -/** - * scic_controller_start() - This method will start the supplied core - * controller. This method will start the staggered spin up operation. The - * SCI User completion callback is called when the following conditions are - * met: -# the return status of this method is SCI_SUCCESS. -# after all of - * the phys have successfully started or been given the opportunity to start. - * @controller: the handle to the controller object to start. - * @timeout: This parameter specifies the number of milliseconds in which the - * start operation should complete. - * - * The SCI Core user must have filled in the physical memory descriptor - * structure via the sci_controller_get_memory_descriptor_list() method. The - * SCI Core user must have invoked the scic_controller_initialize() method - * prior to invoking this method. The controller must be in the INITIALIZED or - * STARTED state. Indicate if the controller start method succeeded or failed - * in some way. SCI_SUCCESS if the start operation succeeded. - * SCI_WARNING_ALREADY_IN_STATE if the controller is already in the STARTED - * state. SCI_FAILURE_INVALID_STATE if the controller is not either in the - * INITIALIZED or STARTED states. SCI_FAILURE_INVALID_MEMORY_DESCRIPTOR if - * there are inconsistent or invalid values in the supplied - * struct sci_physical_memory_descriptor array. - */ enum sci_status scic_controller_start( struct scic_sds_controller *controller, u32 timeout); -/** - * scic_controller_stop() - This method will stop an individual controller - * object.This method will invoke the associated user callback upon - * completion. The completion callback is called when the following - * conditions are met: -# the method return status is SCI_SUCCESS. -# the - * controller has been quiesced. This method will ensure that all IO - * requests are quiesced, phys are stopped, and all additional operation by - * the hardware is halted. - * @controller: the handle to the controller object to stop. - * @timeout: This parameter specifies the number of milliseconds in which the - * stop operation should complete. - * - * The controller must be in the STARTED or STOPPED state. Indicate if the - * controller stop method succeeded or failed in some way. SCI_SUCCESS if the - * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the - * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the - * controller is not either in the STARTED or STOPPED states. - */ enum sci_status scic_controller_stop( struct scic_sds_controller *controller, u32 timeout); -/** - * scic_controller_reset() - This method will reset the supplied core - * controller regardless of the state of said controller. This operation is - * considered destructive. In other words, all current operations are wiped - * out. No IO completions for outstanding devices occur. Outstanding IO - * requests are not aborted or completed at the actual remote device. - * @controller: the handle to the controller object to reset. - * - * Indicate if the controller reset method succeeded or failed in some way. - * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if - * the controller reset operation is unable to complete. - */ enum sci_status scic_controller_reset( struct scic_sds_controller *controller); -/** - * scic_controller_start_io() - This method is called by the SCI user to - * send/start an IO request. If the method invocation is successful, then - * the IO request has been queued to the hardware for processing. - * @controller: the handle to the controller object for which to start an IO - * request. - * @remote_device: the handle to the remote device object for which to start an - * IO request. - * @io_request: the handle to the io request object to start. - * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. This parameter is optional. - * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value - * for this parameter. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a - * result, it is expected the user will have set the NCQ tag field in the host - * to device register FIS prior to calling this method. There is also a - * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking - * the scic_controller_start_io() method. scic_controller_allocate_tag() for - * more information on allocating a tag. Indicate if the controller - * successfully started the IO request. SCI_IO_SUCCESS if the IO request was - * successfully started. Determine the failure situations and return values. - */ enum sci_io_status scic_controller_start_io( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device, struct scic_sds_request *io_request, u16 io_tag); - -/** - * scic_controller_start_task() - This method is called by the SCIC user to - * send/start a framework task management request. - * @controller: the handle to the controller object for which to start the task - * management request. - * @remote_device: the handle to the remote device object for which to start - * the task management request. - * @task_request: the handle to the task request object to start. - * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. Note this not the io_tag - * of the request being managed. It is to be utilized for the task request - * itself. This parameter is optional. The user is allowed to supply - * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - The user must synchronize this task with completion - * queue processing. If they are not synchronized then it is possible for the - * io requests that are being managed by the task request can complete before - * starting the task request. scic_controller_allocate_tag() for more - * information on allocating a tag. Indicate if the controller successfully - * started the IO request. SCI_TASK_SUCCESS if the task request was - * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is - * returned if there is/are task(s) outstanding that require termination or - * completion before this request can succeed. - */ enum sci_task_status scic_controller_start_task( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device, struct scic_sds_request *task_request, u16 io_tag); -/** - * scic_controller_complete_task() - This method will perform core specific - * completion operations for task management request. After this method is - * invoked, the user should consider the task request as invalid until it is - * properly reused (i.e. re-constructed). - * @controller: The handle to the controller object for which to complete the - * task management request. - * @remote_device: The handle to the remote device object for which to complete - * the task management request. - * @task_request: the handle to the task management request object to complete. - * - * Indicate if the controller successfully completed the task management - * request. SCI_SUCCESS if the completion process was successful. - */ enum sci_status scic_controller_complete_task( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device, struct scic_sds_request *task_request); - -/** - * scic_controller_terminate_request() - This method is called by the SCI Core - * user to terminate an ongoing (i.e. started) core IO request. This does - * not abort the IO request at the target, but rather removes the IO request - * from the host controller. - * @controller: the handle to the controller object for which to terminate a - * request. - * @remote_device: the handle to the remote device object for which to - * terminate a request. - * @request: the handle to the io or task management request object to - * terminate. - * - * Indicate if the controller successfully began the terminate process for the - * IO request. SCI_SUCCESS if the terminate process was successfully started - * for the request. Determine the failure situations and return values. - */ enum sci_status scic_controller_terminate_request( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device, struct scic_sds_request *request); -/** - * scic_controller_complete_io() - This method will perform core specific - * completion operations for an IO request. After this method is invoked, - * the user should consider the IO request as invalid until it is properly - * reused (i.e. re-constructed). - * @controller: The handle to the controller object for which to complete the - * IO request. - * @remote_device: The handle to the remote device object for which to complete - * the IO request. - * @io_request: the handle to the io request object to complete. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI - * Core user, using the scic_controller_allocate_io_tag() method, then it is - * the responsibility of the caller to invoke the scic_controller_free_io_tag() - * method to free the tag (i.e. this method will not free the IO tag). Indicate - * if the controller successfully completed the IO request. SCI_SUCCESS if the - * completion process was successful. - */ enum sci_status scic_controller_complete_io( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device, struct scic_sds_request *io_request); - -/** - * scic_controller_get_port_handle() - This method simply provides the user - * with a unique handle for a given SAS/SATA core port index. - * @controller: This parameter represents the handle to the controller object - * from which to retrieve a port (SAS or SATA) handle. - * @port_index: This parameter specifies the port index in the controller for - * which to retrieve the port handle. 0 <= port_index < maximum number of - * phys. - * @port_handle: This parameter specifies the retrieved port handle to be - * provided to the caller. - * - * Indicate if the retrieval of the port handle was successful. SCI_SUCCESS - * This value is returned if the retrieval was successful. - * SCI_FAILURE_INVALID_PORT This value is returned if the supplied port id is - * not in the supported range. - */ enum sci_status scic_controller_get_port_handle( struct scic_sds_controller *controller, u8 port_index, struct scic_sds_port **port_handle); -/** - * scic_controller_get_phy_handle() - This method simply provides the user with - * a unique handle for a given SAS/SATA phy index/identifier. - * @controller: This parameter represents the handle to the controller object - * from which to retrieve a phy (SAS or SATA) handle. - * @phy_index: This parameter specifies the phy index in the controller for - * which to retrieve the phy handle. 0 <= phy_index < maximum number of phys. - * @phy_handle: This parameter specifies the retrieved phy handle to be - * provided to the caller. - * - * Indicate if the retrieval of the phy handle was successful. SCI_SUCCESS This - * value is returned if the retrieval was successful. SCI_FAILURE_INVALID_PHY - * This value is returned if the supplied phy id is not in the supported range. - */ enum sci_status scic_controller_get_phy_handle( struct scic_sds_controller *controller, u8 phy_index, struct scic_sds_phy **phy_handle); -/** - * scic_controller_allocate_io_tag() - This method will allocate a tag from the - * pool of free IO tags. Direct allocation of IO tags by the SCI Core user - * is optional. The scic_controller_start_io() method will allocate an IO - * tag if this method is not utilized and the tag is not supplied to the IO - * construct routine. Direct allocation of IO tags may provide additional - * performance improvements in environments capable of supporting this usage - * model. Additionally, direct allocation of IO tags also provides - * additional flexibility to the SCI Core user. Specifically, the user may - * retain IO tags across the lives of multiple IO requests. - * @controller: the handle to the controller object for which to allocate the - * tag. - * - * IO tags are a protected resource. It is incumbent upon the SCI Core user to - * ensure that each of the methods that may allocate or free available IO tags - * are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). An unsigned integer representing an available IO tag. - * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no - * currently available tags to be allocated. All return other values indicate a - * legitimate tag. - */ u16 scic_controller_allocate_io_tag( struct scic_sds_controller *controller); -/** - * scic_controller_free_io_tag() - This method will free an IO tag to the pool - * of free IO tags. This method provides the SCI Core user more flexibility - * with regards to IO tags. The user may desire to keep an IO tag after an - * IO request has completed, because they plan on re-using the tag for a - * subsequent IO request. This method is only legal if the tag was - * allocated via scic_controller_allocate_io_tag(). - * @controller: This parameter specifies the handle to the controller object - * for which to free/return the tag. - * @io_tag: This parameter represents the tag to be freed to the pool of - * available tags. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI - * Core user, using the scic_controller_allocate_io_tag() method, then it is - * the responsibility of the caller to invoke this method to free the tag. This - * method returns an indication of whether the tag was successfully put back - * (freed) to the pool of available tags. SCI_SUCCESS This return value - * indicates the tag was successfully placed into the pool of available IO - * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag - * is not a valid IO tag value. - */ enum sci_status scic_controller_free_io_tag( struct scic_sds_controller *controller, u16 io_tag); - - - -/** - * scic_controller_set_mode() - This method allows the user to configure the - * SCI core into either a performance mode or a memory savings mode. - * @controller: This parameter represents the handle to the controller object - * for which to update the operating mode. - * @mode: This parameter specifies the new mode for the controller. - * - * Indicate if the user successfully change the operating mode of the - * controller. SCI_SUCCESS The user successfully updated the mode. - */ -enum sci_status scic_controller_set_mode( - struct scic_sds_controller *controller, - enum sci_controller_mode mode); - - -/** - * scic_controller_set_interrupt_coalescence() - This method allows the user to - * configure the interrupt coalescence. - * @controller: This parameter represents the handle to the controller object - * for which its interrupt coalesce register is overridden. - * @coalesce_number: Used to control the number of entries in the Completion - * Queue before an interrupt is generated. If the number of entries exceed - * this number, an interrupt will be generated. The valid range of the input - * is [0, 256]. A setting of 0 results in coalescing being disabled. - * @coalesce_timeout: Timeout value in microseconds. The valid range of the - * input is [0, 2700000] . A setting of 0 is allowed and results in no - * interrupt coalescing timeout. - * - * Indicate if the user successfully set the interrupt coalesce parameters. - * SCI_SUCCESS The user successfully updated the interrutp coalescence. - * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. - */ -enum sci_status scic_controller_set_interrupt_coalescence( - struct scic_sds_controller *controller, - u32 coalesce_number, - u32 coalesce_timeout); - struct device; struct scic_sds_controller *scic_controller_alloc(struct device *dev); - - #endif /* _SCIC_CONTROLLER_H_ */ - diff --git a/drivers/scsi/isci/core/scic_io_request.h b/drivers/scsi/isci/core/scic_io_request.h index a52f33d94ab..fea894a0d28 100644 --- a/drivers/scsi/isci/core/scic_io_request.h +++ b/drivers/scsi/isci/core/scic_io_request.h @@ -325,7 +325,7 @@ enum sci_status scic_io_request_construct( struct scic_sds_remote_device *scic_remote_device, u16 io_tag, void *user_io_request_object, - void *scic_io_request_memory, + struct scic_sds_request *scic_io_request_memory, struct scic_sds_request **new_scic_io_request_handle); /** diff --git a/drivers/scsi/isci/core/scic_port.h b/drivers/scsi/isci/core/scic_port.h index 8222443c9fd..58295334323 100644 --- a/drivers/scsi/isci/core/scic_port.h +++ b/drivers/scsi/isci/core/scic_port.h @@ -56,14 +56,6 @@ #ifndef _SCIC_PORT_H_ #define _SCIC_PORT_H_ -/** - * This file contains all of the interface methods that can be called by an SCI - * Core user on a SAS or SATA port. - * - * - */ - - #include "sci_status.h" #include "intel_sas.h" @@ -78,118 +70,28 @@ enum SCIC_PORT_NOT_READY_REASON_CODE { SCIC_PORT_NOT_READY_REASON_CODE_MAX }; -/** - * struct scic_port_end_point_properties - This structure defines the - * properties that can be retrieved for each end-point local or remote - * (attached) port in the controller. - * - * - */ struct scic_port_end_point_properties { - /** - * This field indicates the SAS address for the associated end - * point in the port. - */ struct sci_sas_address sas_address; - - /** - * This field indicates the protocols supported by the associated - * end-point in the port. - */ struct sci_sas_identify_address_frame_protocols protocols; }; -/** - * struct scic_port_properties - This structure defines the properties that can - * be retrieved for each port in the controller. - * - * - */ struct scic_port_properties { - /** - * This field specifies the logical index of the port (0 relative). - */ u32 index; - - /** - * This field indicates the local end-point properties for port. - */ struct scic_port_end_point_properties local; - - /** - * This field indicates the remote (attached) end-point properties - * for the port. - */ struct scic_port_end_point_properties remote; - - /** - * This field specifies the phys contained inside the port. - */ u32 phy_mask; - }; -/** - * scic_port_get_properties() - This method simply returns the properties - * regarding the port, such as: physical index, protocols, sas address, etc. - * @port: this parameter specifies the port for which to retrieve the physical - * index. - * @properties: This parameter specifies the properties structure into which to - * copy the requested information. - * - * Indicate if the user specified a valid port. SCI_SUCCESS This value is - * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This - * value is returned if the specified port is not valid. When this value is - * returned, no data is copied to the properties output parameter. - */ enum sci_status scic_port_get_properties( struct scic_sds_port *port, struct scic_port_properties *properties); -/** - * scic_port_stop() - This method will make the port no longer ready for - * operation. After invoking this method IO operation is not possible. - * @port: This parameter specifies the port to be stopped. - * - * Indicate if the port was successfully stopped. SCI_SUCCESS This value is - * returned if the port was successfully stopped. SCI_WARNING_ALREADY_IN_STATE - * This value is returned if the port is already stopped or in the process of - * stopping. SCI_FAILURE_INVALID_PORT This value is returned if the supplied - * port is not valid. SCI_FAILURE_INVALID_STATE This value is returned if a - * stop operation can't be completed due to the state of port. - */ -enum sci_status scic_port_stop( - struct scic_sds_port *port); - -/** - * scic_port_hard_reset() - This method will request the SCI implementation to - * perform a HARD RESET on the SAS Port. If/When the HARD RESET completes - * the SCI user will be notified via an SCI OS callback indicating a direct - * attached device was found. - * @port: a handle corresponding to the SAS port to be hard reset. - * @reset_timeout: This parameter specifies the number of milliseconds in which - * the port reset operation should complete. - * - * The SCI User callback in SCIC_USER_CALLBACKS_T will only be called once for - * each phy in the SAS Port at completion of the hard reset sequence. Return a - * status indicating whether the hard reset started successfully. SCI_SUCCESS - * This value is returned if the hard reset operation started successfully. - */ enum sci_status scic_port_hard_reset( struct scic_sds_port *port, u32 reset_timeout); -/** - * scic_port_enable_broadcast_change_notification() - This API method enables - * the broadcast change notification from underneath hardware. - * @port: The port upon which broadcast change notifications (BCN) are to be - * enabled. - * - */ void scic_port_enable_broadcast_change_notification( struct scic_sds_port *port); - #endif /* _SCIC_PORT_H_ */ - diff --git a/drivers/scsi/isci/core/scic_sds_controller.c b/drivers/scsi/isci/core/scic_sds_controller.c index 7d25a0aafb6..5cc6f2fa093 100644 --- a/drivers/scsi/isci/core/scic_sds_controller.c +++ b/drivers/scsi/isci/core/scic_sds_controller.c @@ -219,98 +219,33 @@ sci_controller_get_memory_descriptor_list_handle(struct scic_sds_controller *sci return &scic->parent.mdl; } -/* - * ****************************************************************************- - * * SCIC SDS Controller Initialization Methods - * ****************************************************************************- */ - -/** - * This timer is used to start another phy after we have given up on the - * previous phy to transition to the ready state. - * - * - */ -static void scic_sds_controller_phy_startup_timeout_handler( - void *controller) -{ - enum sci_status status; - struct scic_sds_controller *this_controller; - - this_controller = (struct scic_sds_controller *)controller; - - this_controller->phy_startup_timer_pending = false; - - status = SCI_FAILURE; - - while (status != SCI_SUCCESS) { - status = scic_sds_controller_start_next_phy(this_controller); - } -} - -/** - * - * - * This method initializes the phy startup operations for controller start. - */ -enum sci_status scic_sds_controller_initialize_phy_startup( - struct scic_sds_controller *scic) +static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) { struct isci_host *ihost = sci_object_get_association(scic); + scic->power_control.timer = isci_timer_create(ihost, + scic, + scic_sds_controller_power_control_timer_handler); - scic->phy_startup_timer = isci_timer_create(ihost, - scic, - scic_sds_controller_phy_startup_timeout_handler); - - if (scic->phy_startup_timer == NULL) - return SCI_FAILURE_INSUFFICIENT_RESOURCES; - else { - scic->next_phy_to_start = 0; - scic->phy_startup_timer_pending = false; - } - - return SCI_SUCCESS; -} - -/** - * - * - * This method initializes the power control operations for the controller - * object. - */ -void scic_sds_controller_initialize_power_control( - struct scic_sds_controller *scic) -{ - struct isci_host *ihost = sci_object_get_association(scic); - scic->power_control.timer = isci_timer_create( - ihost, - scic, - scic_sds_controller_power_control_timer_handler); - - memset(scic->power_control.requesters, - 0, + memset(scic->power_control.requesters, 0, sizeof(scic->power_control.requesters)); scic->power_control.phys_waiting = 0; scic->power_control.phys_granted_power = 0; } -/* --------------------------------------------------------------------------- */ - #define SCU_REMOTE_NODE_CONTEXT_ALIGNMENT (32) #define SCU_TASK_CONTEXT_ALIGNMENT (256) #define SCU_UNSOLICITED_FRAME_ADDRESS_ALIGNMENT (64) #define SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT (1024) #define SCU_UNSOLICITED_FRAME_HEADER_ALIGNMENT (64) -/* --------------------------------------------------------------------------- */ - /** * This method builds the memory descriptor table for this controller. * @this_controller: This parameter specifies the controller object for which * to build the memory table. * */ -void scic_sds_controller_build_memory_descriptor_table( +static void scic_sds_controller_build_memory_descriptor_table( struct scic_sds_controller *this_controller) { sci_base_mde_construct( @@ -356,7 +291,7 @@ void scic_sds_controller_build_memory_descriptor_table( * * enum sci_status */ -enum sci_status scic_sds_controller_validate_memory_descriptor_table( +static enum sci_status scic_sds_controller_validate_memory_descriptor_table( struct scic_sds_controller *this_controller) { bool mde_list_valid; @@ -410,7 +345,7 @@ enum sci_status scic_sds_controller_validate_memory_descriptor_table( * @this_controller: * */ -void scic_sds_controller_ram_initialization( +static void scic_sds_controller_ram_initialization( struct scic_sds_controller *this_controller) { struct sci_physical_memory_descriptor *mde; @@ -457,7 +392,7 @@ void scic_sds_controller_ram_initialization( * @this_controller: * */ -void scic_sds_controller_assign_task_entries( +static void scic_sds_controller_assign_task_entries( struct scic_sds_controller *this_controller) { u32 task_assignment; @@ -483,7 +418,7 @@ void scic_sds_controller_assign_task_entries( * * */ -void scic_sds_controller_initialize_completion_queue( +static void scic_sds_controller_initialize_completion_queue( struct scic_sds_controller *this_controller) { u32 index; @@ -533,7 +468,7 @@ void scic_sds_controller_initialize_completion_queue( * * */ -void scic_sds_controller_initialize_unsolicited_frame_queue( +static void scic_sds_controller_initialize_unsolicited_frame_queue( struct scic_sds_controller *this_controller) { u32 frame_queue_control_value; @@ -565,7 +500,7 @@ void scic_sds_controller_initialize_unsolicited_frame_queue( * * */ -void scic_sds_controller_enable_port_task_scheduler( +static void scic_sds_controller_enable_port_task_scheduler( struct scic_sds_controller *this_controller) { u32 port_task_scheduler_value; @@ -578,8 +513,6 @@ void scic_sds_controller_enable_port_task_scheduler( SCU_PTSGCR_WRITE(this_controller, port_task_scheduler_value); } -/* --------------------------------------------------------------------------- */ - /** * * @@ -591,7 +524,7 @@ void scic_sds_controller_enable_port_task_scheduler( /* Initialize the AFE for this phy index. We need to read the AFE setup from * the OEM parameters none */ -void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) +static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) { const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; u32 afe_status; @@ -746,7 +679,7 @@ static void scic_sds_controller_transition_to_ready( } } -void scic_sds_controller_timeout_handler(void *_scic) +static void scic_sds_controller_timeout_handler(void *_scic) { struct scic_sds_controller *scic = _scic; struct isci_host *ihost = sci_object_get_association(scic); @@ -764,7 +697,7 @@ void scic_sds_controller_timeout_handler(void *_scic) __func__); } -enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) { u32 index; enum sci_status port_status; @@ -802,8 +735,7 @@ static inline void scic_sds_controller_phy_timer_start( scic->phy_startup_timer_pending = true; } -inline void scic_sds_controller_phy_timer_stop( - struct scic_sds_controller *scic) +static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic) { isci_timer_stop(scic->phy_startup_timer); @@ -811,16 +743,14 @@ inline void scic_sds_controller_phy_timer_stop( } /** - * This method is called internally by the controller object to start the next - * phy on the controller. If all the phys have been started, then this - * method will attempt to transition the controller to the READY state and - * inform the user (scic_cb_controller_start_complete()). - * @this_controller: This parameter specifies the controller object for which - * to start the next phy. + * scic_sds_controller_start_next_phy - start phy + * @scic: controller * - * enum sci_status + * If all the phys have been started, then attempt to transition the + * controller to the READY state and inform the user + * (scic_cb_controller_start_complete()). */ -enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) { struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; struct scic_sds_phy *sci_phy; @@ -907,14 +837,36 @@ enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *s return status; } -/** - * - * @this_controller: - * - * enum sci_status - */ -enum sci_status scic_sds_controller_stop_phys( - struct scic_sds_controller *this_controller) +static void scic_sds_controller_phy_startup_timeout_handler(void *_scic) +{ + struct scic_sds_controller *scic = _scic; + enum sci_status status; + + scic->phy_startup_timer_pending = false; + status = SCI_FAILURE; + while (status != SCI_SUCCESS) + status = scic_sds_controller_start_next_phy(scic); +} + +static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic) +{ + struct isci_host *ihost = sci_object_get_association(scic); + + scic->phy_startup_timer = isci_timer_create(ihost, + scic, + scic_sds_controller_phy_startup_timeout_handler); + + if (scic->phy_startup_timer == NULL) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + else { + scic->next_phy_to_start = 0; + scic->phy_startup_timer_pending = false; + } + + return SCI_SUCCESS; +} + +static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic) { u32 index; enum sci_status status; @@ -923,7 +875,7 @@ enum sci_status scic_sds_controller_stop_phys( status = SCI_SUCCESS; for (index = 0; index < SCI_MAX_PHYS; index++) { - phy_status = scic_sds_phy_stop(&this_controller->phy_table[index]); + phy_status = scic_sds_phy_stop(&scic->phy_table[index]); if ( (phy_status != SCI_SUCCESS) @@ -931,25 +883,18 @@ enum sci_status scic_sds_controller_stop_phys( ) { status = SCI_FAILURE; - dev_warn(scic_to_dev(this_controller), + dev_warn(scic_to_dev(scic), "%s: Controller stop operation failed to stop " "phy %d because of status %d.\n", __func__, - this_controller->phy_table[index].phy_index, phy_status); + scic->phy_table[index].phy_index, phy_status); } } return status; } -/** - * - * @this_controller: - * - * enum sci_status - */ -enum sci_status scic_sds_controller_stop_devices( - struct scic_sds_controller *this_controller) +static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic) { u32 index; enum sci_status status; @@ -957,19 +902,19 @@ enum sci_status scic_sds_controller_stop_devices( status = SCI_SUCCESS; - for (index = 0; index < this_controller->remote_node_entries; index++) { - if (this_controller->device_table[index] != NULL) { + for (index = 0; index < scic->remote_node_entries; index++) { + if (scic->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ - device_status = scic_remote_device_stop(this_controller->device_table[index], 0); + device_status = scic_remote_device_stop(scic->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { - dev_warn(scic_to_dev(this_controller), + dev_warn(scic_to_dev(scic), "%s: Controller stop operation failed " "to stop device 0x%p because of " "status %d.\n", __func__, - this_controller->device_table[index], device_status); + scic->device_table[index], device_status); } } } @@ -977,18 +922,7 @@ enum sci_status scic_sds_controller_stop_devices( return status; } -/* - * ****************************************************************************- - * * SCIC SDS Controller Power Control (Staggered Spinup) - * ****************************************************************************- */ - -/** - * This function starts the power control timer for this controller object. - * - * @param scic - */ -static inline void scic_sds_controller_power_control_timer_start( - struct scic_sds_controller *scic) +static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic) { isci_timer_start(scic->power_control.timer, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); @@ -996,13 +930,7 @@ static inline void scic_sds_controller_power_control_timer_start( scic->power_control.timer_started = true; } -/** - * This method stops the power control timer for this controller object. - * - * @param scic - */ -static inline void scic_sds_controller_power_control_timer_stop( - struct scic_sds_controller *scic) +static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic) { if (scic->power_control.timer_started) { isci_timer_stop(scic->power_control.timer); @@ -1010,24 +938,12 @@ static inline void scic_sds_controller_power_control_timer_stop( } } -/** - * This method stops and starts the power control timer for this controller - * object. - * - * @param scic - */ -static inline void scic_sds_controller_power_control_timer_restart( - struct scic_sds_controller *scic) +static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic) { scic_sds_controller_power_control_timer_stop(scic); scic_sds_controller_power_control_timer_start(scic); } -/** - * - * - * - */ static void scic_sds_controller_power_control_timer_handler( void *controller) { @@ -1146,8 +1062,6 @@ static bool scic_sds_controller_completion_queue_has_entries( return false; } -/* --------------------------------------------------------------------------- */ - /** * This method processes a task completion notification. This is called from * within the controller completion handler. @@ -1625,10 +1539,6 @@ void scic_sds_controller_error_handler(struct scic_sds_controller *scic) } -u32 scic_sds_controller_get_object_size(void) -{ - return sizeof(struct scic_sds_controller); -} void scic_sds_controller_link_up( @@ -1703,7 +1613,7 @@ void scic_sds_controller_remote_device_started(struct scic_sds_controller *scic, * controller are still in the stopping state. * */ -bool scic_sds_controller_has_remote_devices_stopping( +static bool scic_sds_controller_has_remote_devices_stopping( struct scic_sds_controller *this_controller) { u32 index; @@ -2042,62 +1952,20 @@ static void scic_sds_controller_set_default_config_parameters(struct scic_sds_co scic->user_parameters.sds1.no_outbound_task_timeout = 20; } - -enum sci_status scic_controller_construct(struct scic_sds_controller *controller, - void __iomem *scu_base, - void __iomem *smu_base) -{ - u8 index; - - sci_base_controller_construct( - &controller->parent, - scic_sds_controller_state_table, - controller->memory_descriptors, - ARRAY_SIZE(controller->memory_descriptors), - NULL - ); - - controller->scu_registers = scu_base; - controller->smu_registers = smu_base; - - scic_sds_port_configuration_agent_construct(&controller->port_agent); - - /* Construct the ports for this controller */ - for (index = 0; index < SCI_MAX_PORTS; index++) - scic_sds_port_construct(&controller->port_table[index], - index, controller); - scic_sds_port_construct(&controller->port_table[index], - SCIC_SDS_DUMMY_PORT, controller); - - /* Construct the phys for this controller */ - for (index = 0; index < SCI_MAX_PHYS; index++) { - /* Add all the PHYs to the dummy port */ - scic_sds_phy_construct( - &controller->phy_table[index], - &controller->port_table[SCI_MAX_PORTS], - index - ); - } - - controller->invalid_phy_mask = 0; - - /* Set the default maximum values */ - controller->completion_event_entries = SCU_EVENT_COUNT; - controller->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT; - controller->remote_node_entries = SCI_MAX_REMOTE_DEVICES; - controller->logical_port_entries = SCI_MAX_PORTS; - controller->task_context_entries = SCU_IO_REQUEST_COUNT; - controller->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT; - controller->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT; - - /* Initialize the User and OEM parameters to default values. */ - scic_sds_controller_set_default_config_parameters(controller); - - return scic_controller_reset(controller); -} - -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_initialize() - This method will initialize the controller + * hardware managed by the supplied core controller object. This method + * will bring the physical controller hardware out of reset and enable the + * core to determine the capabilities of the hardware being managed. Thus, + * the core controller can determine it's exact physical (DMA capable) + * memory requirements. + * @controller: This parameter specifies the controller to be initialized. + * + * The SCI Core user must have called scic_controller_construct() on the + * supplied controller object previously. Indicate if the controller was + * successfully initialized or if it failed in some way. SCI_SUCCESS This value + * is returned if the controller hardware was successfully initialized. + */ enum sci_status scic_controller_initialize( struct scic_sds_controller *scic) { @@ -2118,8 +1986,18 @@ enum sci_status scic_controller_initialize( return status; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_get_suggested_start_timeout() - This method returns the + * suggested scic_controller_start() timeout amount. The user is free to + * use any timeout value, but this method provides the suggested minimum + * start timeout value. The returned value is based upon empirical + * information determined as a result of interoperability testing. + * @controller: the handle to the controller object for which to return the + * suggested start timeout. + * + * This method returns the number of milliseconds for the suggested start + * operation timeout. + */ u32 scic_controller_get_suggested_start_timeout( struct scic_sds_controller *sc) { @@ -2146,8 +2024,28 @@ u32 scic_controller_get_suggested_start_timeout( + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_start() - This method will start the supplied core + * controller. This method will start the staggered spin up operation. The + * SCI User completion callback is called when the following conditions are + * met: -# the return status of this method is SCI_SUCCESS. -# after all of + * the phys have successfully started or been given the opportunity to start. + * @controller: the handle to the controller object to start. + * @timeout: This parameter specifies the number of milliseconds in which the + * start operation should complete. + * + * The SCI Core user must have filled in the physical memory descriptor + * structure via the sci_controller_get_memory_descriptor_list() method. The + * SCI Core user must have invoked the scic_controller_initialize() method + * prior to invoking this method. The controller must be in the INITIALIZED or + * STARTED state. Indicate if the controller start method succeeded or failed + * in some way. SCI_SUCCESS if the start operation succeeded. + * SCI_WARNING_ALREADY_IN_STATE if the controller is already in the STARTED + * state. SCI_FAILURE_INVALID_STATE if the controller is not either in the + * INITIALIZED or STARTED states. SCI_FAILURE_INVALID_MEMORY_DESCRIPTOR if + * there are inconsistent or invalid values in the supplied + * struct sci_physical_memory_descriptor array. + */ enum sci_status scic_controller_start( struct scic_sds_controller *scic, u32 timeout) @@ -2169,8 +2067,24 @@ enum sci_status scic_controller_start( return status; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_stop() - This method will stop an individual controller + * object.This method will invoke the associated user callback upon + * completion. The completion callback is called when the following + * conditions are met: -# the method return status is SCI_SUCCESS. -# the + * controller has been quiesced. This method will ensure that all IO + * requests are quiesced, phys are stopped, and all additional operation by + * the hardware is halted. + * @controller: the handle to the controller object to stop. + * @timeout: This parameter specifies the number of milliseconds in which the + * stop operation should complete. + * + * The controller must be in the STARTED or STOPPED state. Indicate if the + * controller stop method succeeded or failed in some way. SCI_SUCCESS if the + * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the + * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the + * controller is not either in the STARTED or STOPPED states. + */ enum sci_status scic_controller_stop( struct scic_sds_controller *scic, u32 timeout) @@ -2192,8 +2106,18 @@ enum sci_status scic_controller_stop( return status; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_reset() - This method will reset the supplied core + * controller regardless of the state of said controller. This operation is + * considered destructive. In other words, all current operations are wiped + * out. No IO completions for outstanding devices occur. Outstanding IO + * requests are not aborted or completed at the actual remote device. + * @controller: the handle to the controller object to reset. + * + * Indicate if the controller reset method succeeded or failed in some way. + * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if + * the controller reset operation is unable to complete. + */ enum sci_status scic_controller_reset( struct scic_sds_controller *scic) { @@ -2214,6 +2138,33 @@ enum sci_status scic_controller_reset( return status; } +/** + * scic_controller_start_io() - This method is called by the SCI user to + * send/start an IO request. If the method invocation is successful, then + * the IO request has been queued to the hardware for processing. + * @controller: the handle to the controller object for which to start an IO + * request. + * @remote_device: the handle to the remote device object for which to start an + * IO request. + * @io_request: the handle to the io request object to start. + * @io_tag: This parameter specifies a previously allocated IO tag that the + * user desires to be utilized for this request. This parameter is optional. + * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value + * for this parameter. + * + * - IO tags are a protected resource. It is incumbent upon the SCI Core user + * to ensure that each of the methods that may allocate or free available IO + * tags are handled in a mutually exclusive manner. This method is one of said + * methods requiring proper critical code section protection (e.g. semaphore, + * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a + * result, it is expected the user will have set the NCQ tag field in the host + * to device register FIS prior to calling this method. There is also a + * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking + * the scic_controller_start_io() method. scic_controller_allocate_tag() for + * more information on allocating a tag. Indicate if the controller + * successfully started the IO request. SCI_IO_SUCCESS if the IO request was + * successfully started. Determine the failure situations and return values. + */ enum sci_io_status scic_controller_start_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, @@ -2231,8 +2182,22 @@ enum sci_io_status scic_controller_start_io( (struct sci_base_request *)io_request, io_tag); } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_terminate_request() - This method is called by the SCI Core + * user to terminate an ongoing (i.e. started) core IO request. This does + * not abort the IO request at the target, but rather removes the IO request + * from the host controller. + * @controller: the handle to the controller object for which to terminate a + * request. + * @remote_device: the handle to the remote device object for which to + * terminate a request. + * @request: the handle to the io or task management request object to + * terminate. + * + * Indicate if the controller successfully began the terminate process for the + * IO request. SCI_SUCCESS if the terminate process was successfully started + * for the request. Determine the failure situations and return values. + */ enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, @@ -2249,8 +2214,28 @@ enum sci_status scic_controller_terminate_request( (struct sci_base_request *)request); } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_complete_io() - This method will perform core specific + * completion operations for an IO request. After this method is invoked, + * the user should consider the IO request as invalid until it is properly + * reused (i.e. re-constructed). + * @controller: The handle to the controller object for which to complete the + * IO request. + * @remote_device: The handle to the remote device object for which to complete + * the IO request. + * @io_request: the handle to the io request object to complete. + * + * - IO tags are a protected resource. It is incumbent upon the SCI Core user + * to ensure that each of the methods that may allocate or free available IO + * tags are handled in a mutually exclusive manner. This method is one of said + * methods requiring proper critical code section protection (e.g. semaphore, + * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI + * Core user, using the scic_controller_allocate_io_tag() method, then it is + * the responsibility of the caller to invoke the scic_controller_free_io_tag() + * method to free the tag (i.e. this method will not free the IO tag). Indicate + * if the controller successfully completed the IO request. SCI_SUCCESS if the + * completion process was successful. + */ enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, @@ -2267,9 +2252,34 @@ enum sci_status scic_controller_complete_io( (struct sci_base_request *)io_request); } -/* --------------------------------------------------------------------------- */ - - +/** + * scic_controller_start_task() - This method is called by the SCIC user to + * send/start a framework task management request. + * @controller: the handle to the controller object for which to start the task + * management request. + * @remote_device: the handle to the remote device object for which to start + * the task management request. + * @task_request: the handle to the task request object to start. + * @io_tag: This parameter specifies a previously allocated IO tag that the + * user desires to be utilized for this request. Note this not the io_tag + * of the request being managed. It is to be utilized for the task request + * itself. This parameter is optional. The user is allowed to supply + * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. + * + * - IO tags are a protected resource. It is incumbent upon the SCI Core user + * to ensure that each of the methods that may allocate or free available IO + * tags are handled in a mutually exclusive manner. This method is one of said + * methods requiring proper critical code section protection (e.g. semaphore, + * spin-lock, etc.). - The user must synchronize this task with completion + * queue processing. If they are not synchronized then it is possible for the + * io requests that are being managed by the task request can complete before + * starting the task request. scic_controller_allocate_tag() for more + * information on allocating a tag. Indicate if the controller successfully + * started the IO request. SCI_TASK_SUCCESS if the task request was + * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is + * returned if there is/are task(s) outstanding that require termination or + * completion before this request can succeed. + */ enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, @@ -2297,8 +2307,20 @@ enum sci_task_status scic_controller_start_task( return status; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_complete_task() - This method will perform core specific + * completion operations for task management request. After this method is + * invoked, the user should consider the task request as invalid until it is + * properly reused (i.e. re-constructed). + * @controller: The handle to the controller object for which to complete the + * task management request. + * @remote_device: The handle to the remote device object for which to complete + * the task management request. + * @task_request: the handle to the task management request object to complete. + * + * Indicate if the controller successfully completed the task management + * request. SCI_SUCCESS if the completion process was successful. + */ enum sci_status scic_controller_complete_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, @@ -2325,8 +2347,22 @@ enum sci_status scic_controller_complete_task( } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_get_port_handle() - This method simply provides the user + * with a unique handle for a given SAS/SATA core port index. + * @controller: This parameter represents the handle to the controller object + * from which to retrieve a port (SAS or SATA) handle. + * @port_index: This parameter specifies the port index in the controller for + * which to retrieve the port handle. 0 <= port_index < maximum number of + * phys. + * @port_handle: This parameter specifies the retrieved port handle to be + * provided to the caller. + * + * Indicate if the retrieval of the port handle was successful. SCI_SUCCESS + * This value is returned if the retrieval was successful. + * SCI_FAILURE_INVALID_PORT This value is returned if the supplied port id is + * not in the supported range. + */ enum sci_status scic_controller_get_port_handle( struct scic_sds_controller *scic, u8 port_index, @@ -2341,8 +2377,20 @@ enum sci_status scic_controller_get_port_handle( return SCI_FAILURE_INVALID_PORT; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_get_phy_handle() - This method simply provides the user with + * a unique handle for a given SAS/SATA phy index/identifier. + * @controller: This parameter represents the handle to the controller object + * from which to retrieve a phy (SAS or SATA) handle. + * @phy_index: This parameter specifies the phy index in the controller for + * which to retrieve the phy handle. 0 <= phy_index < maximum number of phys. + * @phy_handle: This parameter specifies the retrieved phy handle to be + * provided to the caller. + * + * Indicate if the retrieval of the phy handle was successful. SCI_SUCCESS This + * value is returned if the retrieval was successful. SCI_FAILURE_INVALID_PHY + * This value is returned if the supplied phy id is not in the supported range. + */ enum sci_status scic_controller_get_phy_handle( struct scic_sds_controller *scic, u8 phy_index, @@ -2361,8 +2409,28 @@ enum sci_status scic_controller_get_phy_handle( return SCI_FAILURE_INVALID_PHY; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_allocate_io_tag() - This method will allocate a tag from the + * pool of free IO tags. Direct allocation of IO tags by the SCI Core user + * is optional. The scic_controller_start_io() method will allocate an IO + * tag if this method is not utilized and the tag is not supplied to the IO + * construct routine. Direct allocation of IO tags may provide additional + * performance improvements in environments capable of supporting this usage + * model. Additionally, direct allocation of IO tags also provides + * additional flexibility to the SCI Core user. Specifically, the user may + * retain IO tags across the lives of multiple IO requests. + * @controller: the handle to the controller object for which to allocate the + * tag. + * + * IO tags are a protected resource. It is incumbent upon the SCI Core user to + * ensure that each of the methods that may allocate or free available IO tags + * are handled in a mutually exclusive manner. This method is one of said + * methods requiring proper critical code section protection (e.g. semaphore, + * spin-lock, etc.). An unsigned integer representing an available IO tag. + * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no + * currently available tags to be allocated. All return other values indicate a + * legitimate tag. + */ u16 scic_controller_allocate_io_tag( struct scic_sds_controller *scic) { @@ -2380,8 +2448,31 @@ u16 scic_controller_allocate_io_tag( return SCI_CONTROLLER_INVALID_IO_TAG; } -/* --------------------------------------------------------------------------- */ - +/** + * scic_controller_free_io_tag() - This method will free an IO tag to the pool + * of free IO tags. This method provides the SCI Core user more flexibility + * with regards to IO tags. The user may desire to keep an IO tag after an + * IO request has completed, because they plan on re-using the tag for a + * subsequent IO request. This method is only legal if the tag was + * allocated via scic_controller_allocate_io_tag(). + * @controller: This parameter specifies the handle to the controller object + * for which to free/return the tag. + * @io_tag: This parameter represents the tag to be freed to the pool of + * available tags. + * + * - IO tags are a protected resource. It is incumbent upon the SCI Core user + * to ensure that each of the methods that may allocate or free available IO + * tags are handled in a mutually exclusive manner. This method is one of said + * methods requiring proper critical code section protection (e.g. semaphore, + * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI + * Core user, using the scic_controller_allocate_io_tag() method, then it is + * the responsibility of the caller to invoke this method to free the tag. This + * method returns an indication of whether the tag was successfully put back + * (freed) to the pool of available tags. SCI_SUCCESS This return value + * indicates the tag was successfully placed into the pool of available IO + * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag + * is not a valid IO tag value. + */ enum sci_status scic_controller_free_io_tag( struct scic_sds_controller *scic, u16 io_tag) @@ -2408,8 +2499,6 @@ enum sci_status scic_controller_free_io_tag( return SCI_FAILURE_INVALID_IO_TAG; } -/* --------------------------------------------------------------------------- */ - void scic_controller_enable_interrupts( struct scic_sds_controller *scic) { @@ -2417,8 +2506,6 @@ void scic_controller_enable_interrupts( SMU_IMR_WRITE(scic, 0x00000000); } -/* --------------------------------------------------------------------------- */ - void scic_controller_disable_interrupts( struct scic_sds_controller *scic) { @@ -2426,9 +2513,7 @@ void scic_controller_disable_interrupts( SMU_IMR_WRITE(scic, 0xffffffff); } -/* --------------------------------------------------------------------------- */ - -enum sci_status scic_controller_set_mode( +static enum sci_status scic_controller_set_mode( struct scic_sds_controller *scic, enum sci_controller_mode operating_mode) { @@ -2476,7 +2561,7 @@ enum sci_status scic_controller_set_mode( * * This method will reset the controller hardware. */ -void scic_sds_controller_reset_hardware( +static void scic_sds_controller_reset_hardware( struct scic_sds_controller *scic) { /* Disable interrupts so we dont take any spurious interrupts */ @@ -2495,8 +2580,6 @@ void scic_sds_controller_reset_hardware( SCU_UFQGP_WRITE(scic, 0x00000000); } -/* --------------------------------------------------------------------------- */ - enum sci_status scic_user_parameters_set( struct scic_sds_controller *scic, union scic_user_parameters *scic_parms) @@ -2551,17 +2634,6 @@ enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_STATE; } -/* --------------------------------------------------------------------------- */ - -void scic_user_parameters_get( - struct scic_sds_controller *scic, - union scic_user_parameters *scic_parms) -{ - memcpy(scic_parms, (&scic->user_parameters), sizeof(*scic_parms)); -} - -/* --------------------------------------------------------------------------- */ - enum sci_status scic_oem_parameters_set( struct scic_sds_controller *scic, union scic_oem_parameters *scic_parms) @@ -2616,8 +2688,6 @@ enum sci_status scic_oem_parameters_set( return SCI_FAILURE_INVALID_STATE; } -/* --------------------------------------------------------------------------- */ - void scic_oem_parameters_get( struct scic_sds_controller *scic, union scic_oem_parameters *scic_parms) @@ -2625,9 +2695,6 @@ void scic_oem_parameters_get( memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms)); } -/* --------------------------------------------------------------------------- */ - - #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 @@ -2635,7 +2702,24 @@ void scic_oem_parameters_get( #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 -enum sci_status scic_controller_set_interrupt_coalescence( +/** + * scic_controller_set_interrupt_coalescence() - This method allows the user to + * configure the interrupt coalescence. + * @controller: This parameter represents the handle to the controller object + * for which its interrupt coalesce register is overridden. + * @coalesce_number: Used to control the number of entries in the Completion + * Queue before an interrupt is generated. If the number of entries exceed + * this number, an interrupt will be generated. The valid range of the input + * is [0, 256]. A setting of 0 results in coalescing being disabled. + * @coalesce_timeout: Timeout value in microseconds. The valid range of the + * input is [0, 2700000] . A setting of 0 is allowed and results in no + * interrupt coalescing timeout. + * + * Indicate if the user successfully set the interrupt coalesce parameters. + * SCI_SUCCESS The user successfully updated the interrutp coalescence. + * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. + */ +static enum sci_status scic_controller_set_interrupt_coalescence( struct scic_sds_controller *scic_controller, u32 coalesce_number, u32 coalesce_timeout) @@ -3419,7 +3503,7 @@ static enum sci_status scic_sds_controller_stopping_state_complete_io_handler( * This method is called when the struct scic_sds_controller is in a stopping state * and the remote device has stopped. **/ -void scic_sds_controller_stopping_state_device_stopped_handler( +static void scic_sds_controller_stopping_state_device_stopped_handler( struct scic_sds_controller *controller, struct scic_sds_remote_device *remote_device ) @@ -3638,7 +3722,7 @@ static void scic_sds_controller_resetting_state_enter(struct sci_base_object *ob SCI_BASE_CONTROLLER_STATE_RESET); } -const struct sci_base_state scic_sds_controller_state_table[] = { +static const struct sci_base_state scic_sds_controller_state_table[] = { [SCI_BASE_CONTROLLER_STATE_INITIAL] = { .enter_state = scic_sds_controller_initial_state_enter, }, @@ -3662,3 +3746,64 @@ const struct sci_base_state scic_sds_controller_state_table[] = { [SCI_BASE_CONTROLLER_STATE_STOPPED] = {}, [SCI_BASE_CONTROLLER_STATE_FAILED] = {} }; + +/** + * scic_controller_construct() - This method will attempt to construct a + * controller object utilizing the supplied parameter information. + * @c: This parameter specifies the controller to be constructed. + * @scu_base: mapped base address of the scu registers + * @smu_base: mapped base address of the smu registers + * + * Indicate if the controller was successfully constructed or if it failed in + * some way. SCI_SUCCESS This value is returned if the controller was + * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned + * if the interrupt coalescence timer may cause SAS compliance issues for SMP + * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE + * This value is returned if the controller does not support the supplied type. + * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the + * controller does not support the supplied initialization data version. + */ +enum sci_status scic_controller_construct(struct scic_sds_controller *scic, + void __iomem *scu_base, + void __iomem *smu_base) +{ + u8 i; + + sci_base_controller_construct(&scic->parent, + scic_sds_controller_state_table, + scic->memory_descriptors, + ARRAY_SIZE(scic->memory_descriptors), NULL); + + scic->scu_registers = scu_base; + scic->smu_registers = smu_base; + + scic_sds_port_configuration_agent_construct(&scic->port_agent); + + /* Construct the ports for this controller */ + for (i = 0; i < SCI_MAX_PORTS; i++) + scic_sds_port_construct(&scic->port_table[i], i, scic); + scic_sds_port_construct(&scic->port_table[i], SCIC_SDS_DUMMY_PORT, scic); + + /* Construct the phys for this controller */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + /* Add all the PHYs to the dummy port */ + scic_sds_phy_construct(&scic->phy_table[i], + &scic->port_table[SCI_MAX_PORTS], i); + } + + scic->invalid_phy_mask = 0; + + /* Set the default maximum values */ + scic->completion_event_entries = SCU_EVENT_COUNT; + scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT; + scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; + scic->logical_port_entries = SCI_MAX_PORTS; + scic->task_context_entries = SCU_IO_REQUEST_COUNT; + scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT; + scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT; + + /* Initialize the User and OEM parameters to default values. */ + scic_sds_controller_set_default_config_parameters(scic); + + return scic_controller_reset(scic); +} diff --git a/drivers/scsi/isci/core/scic_sds_controller.h b/drivers/scsi/isci/core/scic_sds_controller.h index aa2698bb7ca..fedf5032d89 100644 --- a/drivers/scsi/isci/core/scic_sds_controller.h +++ b/drivers/scsi/isci/core/scic_sds_controller.h @@ -402,7 +402,6 @@ struct scic_sds_controller_state_handler { extern const struct scic_sds_controller_state_handler scic_sds_controller_state_handler_table[]; -extern const struct sci_base_state scic_sds_controller_state_table[]; /** * INCREMENT_QUEUE_GET() - @@ -535,7 +534,6 @@ extern const struct sci_base_state scic_sds_controller_state_table[]; /* --------------------------------------------------------------------------- */ -u32 scic_sds_controller_get_object_size(void); /* --------------------------------------------------------------------------- */ @@ -615,8 +613,6 @@ void scic_sds_controller_link_down( * * CORE CONTROLLER REMOTE DEVICE MESSAGE PROCESSING * ***************************************************************************** */ -bool scic_sds_controller_has_remote_devices_stopping( - struct scic_sds_controller *this_controller); void scic_sds_controller_remote_device_started( struct scic_sds_controller *this_controller, @@ -626,67 +622,11 @@ void scic_sds_controller_remote_device_stopped( struct scic_sds_controller *this_controller, struct scic_sds_remote_device *the_device); - -/* - * ***************************************************************************** - * * CORE CONTROLLER PRIVATE METHODS - * ***************************************************************************** */ - -enum sci_status scic_sds_controller_validate_memory_descriptor_table( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_ram_initialization( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_assign_task_entries( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_afe_initialization( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_enable_port_task_scheduler( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_initialize_completion_queue( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_initialize_unsolicited_frame_queue( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_phy_timer_stop( - struct scic_sds_controller *this_controller); - -enum sci_status scic_sds_controller_start_next_phy( - struct scic_sds_controller *this_controller); - -enum sci_status scic_sds_controller_stop_phys( - struct scic_sds_controller *this_controller); - -enum sci_status scic_sds_controller_stop_ports( - struct scic_sds_controller *this_controller); - -enum sci_status scic_sds_controller_stop_devices( - struct scic_sds_controller *this_controller); - void scic_sds_controller_copy_task_context( struct scic_sds_controller *this_controller, struct scic_sds_request *this_request); -void scic_sds_controller_timeout_handler(void *controller); - -void scic_sds_controller_initialize_power_control( - struct scic_sds_controller *this_controller); - void scic_sds_controller_register_setup( struct scic_sds_controller *this_controller); -void scic_sds_controller_reset_hardware( - struct scic_sds_controller *this_controller); - -enum sci_status scic_sds_controller_initialize_phy_startup( - struct scic_sds_controller *this_controller); - -void scic_sds_controller_build_memory_descriptor_table( - struct scic_sds_controller *this_controller); - #endif /* _SCIC_SDS_CONTROLLER_H_ */ diff --git a/drivers/scsi/isci/core/scic_sds_phy.c b/drivers/scsi/isci/core/scic_sds_phy.c index e546e20f1ff..532338eb302 100644 --- a/drivers/scsi/isci/core/scic_sds_phy.c +++ b/drivers/scsi/isci/core/scic_sds_phy.c @@ -257,7 +257,7 @@ scic_sds_phy_link_layer_initialization(struct scic_sds_phy *sci_phy, * restart the starting substate machine since we dont know what has actually * happening. */ -void scic_sds_phy_sata_timeout(void *phy) +static void scic_sds_phy_sata_timeout(void *phy) { struct scic_sds_phy *sci_phy = phy; @@ -273,47 +273,6 @@ void scic_sds_phy_sata_timeout(void *phy) SCI_BASE_PHY_STATE_STARTING); } -/** - * This method will construct the struct scic_sds_phy object - * @this_phy: - * @owning_port: - * @phy_index: - * - */ -void scic_sds_phy_construct( - struct scic_sds_phy *this_phy, - struct scic_sds_port *owning_port, - u8 phy_index) -{ - /* - * Call the base constructor first - */ - sci_base_phy_construct( - &this_phy->parent, - scic_sds_phy_state_table - ); - - /* Copy the rest of the input data to our locals */ - this_phy->owning_port = owning_port; - this_phy->phy_index = phy_index; - this_phy->bcn_received_while_port_unassigned = false; - this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; - this_phy->link_layer_registers = NULL; - this_phy->max_negotiated_speed = SCI_SAS_NO_LINK_RATE; - this_phy->sata_timeout_timer = NULL; - - /* Clear out the identification buffer data */ - memset(&this_phy->phy_type, 0, sizeof(this_phy->phy_type)); - - /* Initialize the the substate machines */ - sci_base_state_machine_construct( - &this_phy->starting_substate_machine, - &this_phy->parent.parent, - scic_sds_phy_starting_substates, - SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL - ); -} - /** * This method returns the port currently containing this phy. If the phy is * currently contained by the dummy port, then the phy is considered to not @@ -426,7 +385,7 @@ void scic_sds_phy_setup_transport( * This function will perform the register reads/writes to suspend the SCU * hardware protocol engine. none */ -void scic_sds_phy_suspend( +static void scic_sds_phy_suspend( struct scic_sds_phy *this_phy) { u32 scu_sas_pcfg_value; @@ -1416,7 +1375,62 @@ static enum sci_status scic_sds_phy_starting_substate_await_sata_power_consume_p return SCI_SUCCESS; } -const struct scic_sds_phy_state_handler scic_sds_phy_starting_substate_handler_table[] = { +static enum sci_status default_phy_handler(struct sci_base_phy *base_phy, const char *func) +{ + struct scic_sds_phy *sci_phy; + + sci_phy = container_of(base_phy, typeof(*sci_phy), parent); + dev_dbg(sciphy_to_dev(sci_phy), + "%s: in wrong state: %d\n", func, + sci_base_state_machine_get_state(&base_phy->state_machine)); + return SCI_FAILURE_INVALID_STATE; +} + +static enum sci_status scic_sds_phy_default_start_handler(struct sci_base_phy *base_phy) +{ + return default_phy_handler(base_phy, __func__); +} + +static enum sci_status scic_sds_phy_default_stop_handler(struct sci_base_phy *base_phy) +{ + return default_phy_handler(base_phy, __func__); +} + +static enum sci_status scic_sds_phy_default_reset_handler(struct sci_base_phy *base_phy) +{ + return default_phy_handler(base_phy, __func__); +} + +static enum sci_status scic_sds_phy_default_destroy_handler(struct sci_base_phy *base_phy) +{ + return default_phy_handler(base_phy, __func__); +} + +static enum sci_status scic_sds_phy_default_frame_handler(struct scic_sds_phy *sci_phy, + u32 frame_index) +{ + struct scic_sds_controller *scic = scic_sds_phy_get_controller(sci_phy); + + default_phy_handler(&sci_phy->parent, __func__); + scic_sds_controller_release_frame(scic, frame_index); + + return SCI_FAILURE_INVALID_STATE; +} + +static enum sci_status scic_sds_phy_default_event_handler(struct scic_sds_phy *sci_phy, + u32 event_code) +{ + return default_phy_handler(&sci_phy->parent, __func__); +} + +static enum sci_status scic_sds_phy_default_consume_power_handler(struct scic_sds_phy *sci_phy) +{ + return default_phy_handler(&sci_phy->parent, __func__); +} + + + +static const struct scic_sds_phy_state_handler scic_sds_phy_starting_substate_handler_table[] = { [SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL] = { .parent.start_handler = scic_sds_phy_default_start_handler, .parent.stop_handler = scic_sds_phy_starting_substate_general_stop_handler, @@ -1867,7 +1881,7 @@ static void scic_sds_phy_starting_final_substate_enter(struct sci_base_object *o /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_phy_starting_substates[] = { +static const struct sci_base_state scic_sds_phy_starting_substates[] = { [SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL] = { .enter_state = scic_sds_phy_starting_initial_substate_enter, }, @@ -1905,196 +1919,6 @@ const struct sci_base_state scic_sds_phy_starting_substates[] = { } }; -/* - * *************************************************************************** - * * DEFAULT HANDLERS - * *************************************************************************** */ - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * - * This is the default method for phy a start request. It will report a - * warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_start_handler( - struct sci_base_phy *phy) -{ - struct scic_sds_phy *this_phy; - - this_phy = (struct scic_sds_phy *)phy; - - dev_warn(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p requested to start from invalid " - "state %d\n", - __func__, - this_phy, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; - -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a - * struct scic_sds_phy object. - * - * This is the default method for phy a stop request. It will report a warning - * and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_stop_handler(struct sci_base_phy *base_phy) -{ - struct scic_sds_phy *sci_phy; - - sci_phy = (struct scic_sds_phy *)base_phy; - - dev_dbg(sciphy_to_dev(sci_phy), - "%s: SCIC Phy 0x%p requested to stop from invalid state %d\n", - __func__, - sci_phy, - sci_base_state_machine_get_state( - &sci_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * - * This is the default method for phy a reset request. It will report a - * warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_reset_handler( - struct sci_base_phy *phy) -{ - struct scic_sds_phy *this_phy; - - this_phy = (struct scic_sds_phy *)phy; - - dev_warn(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p requested to reset from invalid state " - "%d\n", - __func__, - this_phy, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * - * This is the default method for phy a destruct request. It will report a - * warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_destroy_handler( - struct sci_base_phy *phy) -{ - struct scic_sds_phy *this_phy; - - this_phy = (struct scic_sds_phy *)phy; - - /* / @todo Implement something for the default */ - dev_warn(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p requested to destroy from invalid " - "state %d\n", - __func__, - this_phy, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * @frame_index: This is the frame index that was received from the SCU - * hardware. - * - * This is the default method for a phy frame handling request. It will report - * a warning, release the frame and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_frame_handler( - struct scic_sds_phy *this_phy, - u32 frame_index) -{ - dev_warn(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p received unexpected frame data %d " - "while in state %d\n", - __func__, - this_phy, - frame_index, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - scic_sds_controller_release_frame( - scic_sds_phy_get_controller(this_phy), frame_index); - - return SCI_FAILURE_INVALID_STATE; -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * @event_code: This is the event code that was received from the SCU hardware. - * - * This is the default method for a phy event handler. It will report a - * warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_event_handler( - struct scic_sds_phy *this_phy, - u32 event_code) -{ - dev_dbg(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p received unexpected event status %x " - "while in state %d\n", - __func__, - this_phy, - event_code, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -/** - * - * @phy: This is the struct sci_base_phy object which is cast into a struct scic_sds_phy - * object. - * - * This is the default method for a phy consume power handler. It will report - * a warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE - */ -enum sci_status scic_sds_phy_default_consume_power_handler( - struct scic_sds_phy *this_phy) -{ - dev_warn(sciphy_to_dev(this_phy), - "%s: SCIC Phy 0x%p given unexpected permission to consume " - "power while in state %d\n", - __func__, - this_phy, - sci_base_state_machine_get_state( - &this_phy->parent.state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -/* - * ****************************************************************************** - * * PHY STOPPED STATE HANDLERS - * ****************************************************************************** */ - /** * * @phy: This is the struct sci_base_phy object which is cast into a @@ -2219,7 +2043,7 @@ static enum sci_status scic_sds_phy_resetting_state_event_handler(struct scic_sd /* --------------------------------------------------------------------------- */ -const struct scic_sds_phy_state_handler scic_sds_phy_state_handler_table[] = { +static const struct scic_sds_phy_state_handler scic_sds_phy_state_handler_table[] = { [SCI_BASE_PHY_STATE_INITIAL] = { .parent.start_handler = scic_sds_phy_default_start_handler, .parent.stop_handler = scic_sds_phy_default_stop_handler, @@ -2563,7 +2387,7 @@ static void scic_sds_phy_final_state_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_phy_state_table[] = { +static const struct sci_base_state scic_sds_phy_state_table[] = { [SCI_BASE_PHY_STATE_INITIAL] = { .enter_state = scic_sds_phy_initial_state_enter, }, @@ -2585,3 +2409,29 @@ const struct sci_base_state scic_sds_phy_state_table[] = { }, }; +void scic_sds_phy_construct(struct scic_sds_phy *sci_phy, + struct scic_sds_port *owning_port, u8 phy_index) +{ + /* + * Call the base constructor first + */ + sci_base_phy_construct(&sci_phy->parent, scic_sds_phy_state_table); + + /* Copy the rest of the input data to our locals */ + sci_phy->owning_port = owning_port; + sci_phy->phy_index = phy_index; + sci_phy->bcn_received_while_port_unassigned = false; + sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; + sci_phy->link_layer_registers = NULL; + sci_phy->max_negotiated_speed = SCI_SAS_NO_LINK_RATE; + sci_phy->sata_timeout_timer = NULL; + + /* Clear out the identification buffer data */ + memset(&sci_phy->phy_type, 0, sizeof(sci_phy->phy_type)); + + /* Initialize the the substate machines */ + sci_base_state_machine_construct(&sci_phy->starting_substate_machine, + &sci_phy->parent.parent, + scic_sds_phy_starting_substates, + SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL); +} diff --git a/drivers/scsi/isci/core/scic_sds_phy.h b/drivers/scsi/isci/core/scic_sds_phy.h index af9e24c3094..4745a791f15 100644 --- a/drivers/scsi/isci/core/scic_sds_phy.h +++ b/drivers/scsi/isci/core/scic_sds_phy.h @@ -293,12 +293,6 @@ struct scic_sds_phy_state_handler { }; -extern const struct scic_sds_phy_state_handler scic_sds_phy_state_handler_table[]; -extern const struct sci_base_state scic_sds_phy_state_table[]; -extern const struct sci_base_state scic_sds_phy_starting_substates[]; -extern const struct scic_sds_phy_state_handler scic_sds_phy_starting_substate_handler_table[]; - - /** * scic_sds_phy_get_index() - * @@ -362,12 +356,6 @@ enum sci_status scic_sds_phy_stop( enum sci_status scic_sds_phy_reset( struct scic_sds_phy *this_phy); -void scic_sds_phy_sata_timeout( - void *cookie); - -void scic_sds_phy_suspend( - struct scic_sds_phy *this_phy); - void scic_sds_phy_resume( struct scic_sds_phy *this_phy); @@ -402,27 +390,4 @@ void scic_sds_phy_get_attached_phy_protocols( struct scic_sds_phy *this_phy, struct sci_sas_identify_address_frame_protocols *protocols); -enum sci_status scic_sds_phy_default_start_handler( - struct sci_base_phy *phy); - -enum sci_status scic_sds_phy_default_stop_handler( - struct sci_base_phy *phy); - -enum sci_status scic_sds_phy_default_reset_handler( - struct sci_base_phy *phy); - -enum sci_status scic_sds_phy_default_destroy_handler( - struct sci_base_phy *phy); - -enum sci_status scic_sds_phy_default_frame_handler( - struct scic_sds_phy *phy, - u32 frame_index); - -enum sci_status scic_sds_phy_default_event_handler( - struct scic_sds_phy *phy, - u32 evnet_code); - -enum sci_status scic_sds_phy_default_consume_power_handler( - struct scic_sds_phy *phy); - #endif /* _SCIC_SDS_PHY_H_ */ diff --git a/drivers/scsi/isci/core/scic_sds_port.c b/drivers/scsi/isci/core/scic_sds_port.c index 72b815ef98c..0a95f649c04 100644 --- a/drivers/scsi/isci/core/scic_sds_port.c +++ b/drivers/scsi/isci/core/scic_sds_port.c @@ -69,19 +69,13 @@ #include "sci_environment.h" #include "scic_sds_controller_registers.h" - -static void scic_sds_port_invalid_link_up( - struct scic_sds_port *this_port, - struct scic_sds_phy *phy); -static void scic_sds_port_timeout_handler( - void *port); #define SCIC_SDS_PORT_MIN_TIMER_COUNT (SCI_MAX_PORTS) #define SCIC_SDS_PORT_MAX_TIMER_COUNT (SCI_MAX_PORTS) #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) #define SCU_DUMMY_INDEX (0xFFFF) -void sci_base_port_construct( +static void sci_base_port_construct( struct sci_base_port *base_port, const struct sci_base_state *state_table) { @@ -168,7 +162,7 @@ bool scic_sds_port_is_valid_phy_assignment( * Return a bit mask indicating which phys are a part of this port. Each bit * corresponds to a phy identifier (e.g. bit 0 = phy id 0). */ -u32 scic_sds_port_get_phys(struct scic_sds_port *this_port) +static u32 scic_sds_port_get_phys(struct scic_sds_port *this_port) { u32 index; u32 mask; @@ -196,7 +190,7 @@ u32 scic_sds_port_get_phys(struct scic_sds_port *this_port) * phy mask can be supported. true if this is a valid phy assignment for the * port false if this is not a valid phy assignment for the port */ -bool scic_sds_port_is_phy_mask_valid( +static bool scic_sds_port_is_phy_mask_valid( struct scic_sds_port *this_port, u32 phy_mask) { @@ -269,7 +263,7 @@ static struct scic_sds_phy *scic_sds_port_get_a_connected_phy( * is a functional test that only fails if the phy is currently assigned to a * different port. */ -enum sci_status scic_sds_port_set_phy( +static enum sci_status scic_sds_port_set_phy( struct scic_sds_port *port, struct scic_sds_phy *phy) { @@ -304,7 +298,7 @@ enum sci_status scic_sds_port_set_phy( * this phy is not currently assinged to this port. bool true if the phy is * removed from the port. false if this phy is not assined to this port. */ -enum sci_status scic_sds_port_clear_phy( +static enum sci_status scic_sds_port_clear_phy( struct scic_sds_port *port, struct scic_sds_phy *phy) { @@ -485,7 +479,7 @@ void scic_sds_port_get_attached_protocols( * This structure will be posted to the hardware to work around a scheduler * error in the hardware. */ -void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u16 rni) +static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u16 rni) { union scu_remote_node_context *rnc; @@ -520,7 +514,7 @@ void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u16 rni) * in the hardware. * */ -void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tci) +static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tci) { struct scu_task_context *task_context; @@ -554,7 +548,7 @@ void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tci) task_context->task_phase = 0x01; } -void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port) +static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port) { struct scic_sds_controller *scic = sci_port->owning_controller; @@ -569,38 +563,6 @@ void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port) sci_port->reserved_tci = SCU_DUMMY_INDEX; } -void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 port_index, - struct scic_sds_controller *scic) -{ - u32 index; - - sci_base_port_construct(&sci_port->parent, scic_sds_port_state_table); - - sci_base_state_machine_construct(&sci_port->ready_substate_machine, - &sci_port->parent.parent, - scic_sds_port_ready_substate_table, - SCIC_SDS_PORT_READY_SUBSTATE_WAITING); - - sci_port->logical_port_index = SCIC_SDS_DUMMY_PORT; - sci_port->physical_port_index = port_index; - sci_port->active_phy_mask = 0; - - sci_port->owning_controller = scic; - - sci_port->started_request_count = 0; - sci_port->assigned_device_count = 0; - - sci_port->reserved_rni = SCU_DUMMY_INDEX; - sci_port->reserved_tci = SCU_DUMMY_INDEX; - - sci_port->timer_handle = NULL; - - sci_port->port_task_scheduler_registers = NULL; - - for (index = 0; index < SCI_MAX_PHYS; index++) - sci_port->phy_table[index] = NULL; -} - /** * This method performs initialization of the supplied port. Initialization * includes: - state machine initialization - member variable initialization @@ -627,61 +589,18 @@ enum sci_status scic_sds_port_initialize( } /** + * scic_port_get_properties() - This method simply returns the properties + * regarding the port, such as: physical index, protocols, sas address, etc. + * @port: this parameter specifies the port for which to retrieve the physical + * index. + * @properties: This parameter specifies the properties structure into which to + * copy the requested information. * - * @this_port: This is the struct scic_sds_port object for which has a phy that has - * gone link up. - * @the_phy: This is the struct scic_sds_phy object that has gone link up. - * @do_notify_user: This parameter specifies whether to inform the user (via - * scic_cb_port_link_up()) as to the fact that a new phy as become ready. - * - * This method is the a general link up handler for the struct scic_sds_port object. - * This function will determine if this struct scic_sds_phy can be assigned to this - * struct scic_sds_port object. If the struct scic_sds_phy object can is not a valid PHY for - * this port then the function will notify the SCIC_USER. A PHY can only be - * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in - * the same port. none + * Indicate if the user specified a valid port. SCI_SUCCESS This value is + * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This + * value is returned if the specified port is not valid. When this value is + * returned, no data is copied to the properties output parameter. */ -void scic_sds_port_general_link_up_handler( - struct scic_sds_port *this_port, - struct scic_sds_phy *the_phy, - bool do_notify_user) -{ - struct sci_sas_address port_sas_address; - struct sci_sas_address phy_sas_address; - - scic_sds_port_get_attached_sas_address(this_port, &port_sas_address); - scic_sds_phy_get_attached_sas_address(the_phy, &phy_sas_address); - - /* - * If the SAS address of the new phy matches the SAS address of - * other phys in the port OR this is the first phy in the port, - * then activate the phy and allow it to be used for operations - * in this port. */ - if ( - ( - (phy_sas_address.high == port_sas_address.high) - && (phy_sas_address.low == port_sas_address.low) - ) - || (this_port->active_phy_mask == 0) - ) { - scic_sds_port_activate_phy(this_port, the_phy, do_notify_user); - - if (this_port->parent.state_machine.current_state_id - == SCI_BASE_PORT_STATE_RESETTING) { - sci_base_state_machine_change_state( - &this_port->parent.state_machine, SCI_BASE_PORT_STATE_READY - ); - } - } else { - scic_sds_port_invalid_link_up(this_port, the_phy); - } -} - -enum sci_status scic_port_stop(struct scic_sds_port *port) -{ - return port->state_handlers->parent.stop_handler(&port->parent); -} - enum sci_status scic_port_get_properties( struct scic_sds_port *port, struct scic_port_properties *prop) @@ -700,7 +619,20 @@ enum sci_status scic_port_get_properties( return SCI_SUCCESS; } - +/** + * scic_port_hard_reset() - This method will request the SCI implementation to + * perform a HARD RESET on the SAS Port. If/When the HARD RESET completes + * the SCI user will be notified via an SCI OS callback indicating a direct + * attached device was found. + * @port: a handle corresponding to the SAS port to be hard reset. + * @reset_timeout: This parameter specifies the number of milliseconds in which + * the port reset operation should complete. + * + * The SCI User callback in SCIC_USER_CALLBACKS_T will only be called once for + * each phy in the SAS Port at completion of the hard reset sequence. Return a + * status indicating whether the hard reset started successfully. SCI_SUCCESS + * This value is returned if the hard reset operation started successfully. + */ enum sci_status scic_port_hard_reset( struct scic_sds_port *port, u32 reset_timeout) @@ -741,12 +673,11 @@ void scic_sds_port_setup_transports( * the phy to the port - enabling the Protocol Engine in the silicon. - * notifying the user that the link is up. none */ -void scic_sds_port_activate_phy(struct scic_sds_port *sci_port, - struct scic_sds_phy *sci_phy, - bool do_notify_user) +static void scic_sds_port_activate_phy(struct scic_sds_port *sci_port, + struct scic_sds_phy *sci_phy, + bool do_notify_user) { - struct scic_sds_controller *scic = - scic_sds_port_get_controller(sci_port); + struct scic_sds_controller *scic = scic_sds_port_get_controller(sci_port); struct sci_sas_identify_address_frame_protocols protocols; struct isci_host *ihost = sci_object_get_association(scic); @@ -764,22 +695,11 @@ void scic_sds_port_activate_phy(struct scic_sds_port *sci_port, isci_port_link_up(ihost, sci_port, sci_phy); } -/** - * - * @this_port: This is the port on which the phy should be deactivated. - * @the_phy: This is the specific phy that is no longer active in the port. - * @do_notify_user: This parameter specifies whether to inform the user (via - * isci_port_link_down()) as to the fact that a new phy as become - * ready. - * - * This function will deactivate the supplied phy in the port. none - */ void scic_sds_port_deactivate_phy(struct scic_sds_port *sci_port, struct scic_sds_phy *sci_phy, bool do_notify_user) { - struct scic_sds_controller *scic = - scic_sds_port_get_controller(sci_port); + struct scic_sds_controller *scic = scic_sds_port_get_controller(sci_port); struct isci_port *iport = sci_object_get_association(sci_port); struct isci_host *ihost = sci_object_get_association(scic); struct isci_phy *iphy = sci_object_get_association(sci_phy); @@ -821,6 +741,48 @@ static void scic_sds_port_invalid_link_up( } } +/** + * scic_sds_port_general_link_up_handler - phy can be assigned to port? + * @sci_port: scic_sds_port object for which has a phy that has gone link up. + * @sci_phy: This is the struct scic_sds_phy object that has gone link up. + * @do_notify_user: This parameter specifies whether to inform the user (via + * scic_cb_port_link_up()) as to the fact that a new phy as become ready. + * + * Determine if this phy can be assigned to this + * port . If the phy is not a valid PHY for + * this port then the function will notify the user. A PHY can only be + * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in + * the same port. none + */ +static void scic_sds_port_general_link_up_handler(struct scic_sds_port *sci_port, + struct scic_sds_phy *sci_phy, + bool do_notify_user) +{ + struct sci_sas_address port_sas_address; + struct sci_sas_address phy_sas_address; + + scic_sds_port_get_attached_sas_address(sci_port, &port_sas_address); + scic_sds_phy_get_attached_sas_address(sci_phy, &phy_sas_address); + + /* If the SAS address of the new phy matches the SAS address of + * other phys in the port OR this is the first phy in the port, + * then activate the phy and allow it to be used for operations + * in this port. + */ + if ((phy_sas_address.high == port_sas_address.high && + phy_sas_address.low == port_sas_address.low) || + sci_port->active_phy_mask == 0) { + struct sci_base_state_machine *sm = &sci_port->parent.state_machine; + + scic_sds_port_activate_phy(sci_port, sci_phy, do_notify_user); + if (sm->current_state_id == SCI_BASE_PORT_STATE_RESETTING) + sci_base_state_machine_change_state(sm, SCI_BASE_PORT_STATE_READY); + } else + scic_sds_port_invalid_link_up(sci_port, sci_phy); +} + + + /** * This method returns false if the port only has a single phy object assigned. * If there are no phys or more than one phy then the method will return @@ -1004,7 +966,7 @@ static void scic_sds_port_timeout_handler(void *port) * * */ -void scic_sds_port_update_viit_entry(struct scic_sds_port *this_port) +static void scic_sds_port_update_viit_entry(struct scic_sds_port *this_port) { struct sci_sas_address sas_address; @@ -1483,9 +1445,106 @@ static enum sci_status scic_sds_port_ready_configuring_substate_complete_io_hand return SCI_SUCCESS; } -/* --------------------------------------------------------------------------- */ +static enum sci_status default_port_handler(struct sci_base_port *base_port, const char *func) +{ + struct scic_sds_port *sci_port; + + sci_port = container_of(base_port, typeof(*sci_port), parent); + dev_warn(sciport_to_dev(sci_port), + "%s: in wrong state: %d\n", func, + sci_base_state_machine_get_state(&base_port->state_machine)); + return SCI_FAILURE_INVALID_STATE; +} + +static enum sci_status scic_sds_port_default_start_handler(struct sci_base_port *base_port) +{ + return default_port_handler(base_port, __func__); +} + +static enum sci_status scic_sds_port_default_stop_handler(struct sci_base_port *base_port) +{ + return default_port_handler(base_port, __func__); +} + +static enum sci_status scic_sds_port_default_destruct_handler(struct sci_base_port *base_port) +{ + return default_port_handler(base_port, __func__); +} + +static enum sci_status scic_sds_port_default_reset_handler(struct sci_base_port *base_port, + u32 timeout) +{ + return default_port_handler(base_port, __func__); +} + +static enum sci_status scic_sds_port_default_add_phy_handler(struct sci_base_port *base_port, + struct sci_base_phy *base_phy) +{ + return default_port_handler(base_port, __func__); +} + +static enum sci_status scic_sds_port_default_remove_phy_handler(struct sci_base_port *base_port, + struct sci_base_phy *base_phy) +{ + return default_port_handler(base_port, __func__); +} + +/** + * scic_sds_port_default_frame_handler + * @port: This is the struct sci_base_port object which is cast into a struct scic_sds_port + * object. + * + * This is the default method for a port unsolicited frame request. It will + * report a warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE Is it even + * possible to receive an unsolicited frame directed to a port object? It + * seems possible if we implementing virtual functions but until then? + */ +static enum sci_status scic_sds_port_default_frame_handler(struct scic_sds_port *sci_port, + u32 frame_index) +{ + struct scic_sds_controller *scic = scic_sds_port_get_controller(sci_port); + + default_port_handler(&sci_port->parent, __func__); + scic_sds_controller_release_frame(scic, frame_index); + + return SCI_FAILURE_INVALID_STATE; +} + +static enum sci_status scic_sds_port_default_event_handler(struct scic_sds_port *sci_port, + u32 event_code) +{ + return default_port_handler(&sci_port->parent, __func__); +} + +static void scic_sds_port_default_link_up_handler(struct scic_sds_port *sci_port, + struct scic_sds_phy *sci_phy) +{ + default_port_handler(&sci_port->parent, __func__); +} + +static void scic_sds_port_default_link_down_handler(struct scic_sds_port *sci_port, + struct scic_sds_phy *sci_phy) +{ + default_port_handler(&sci_port->parent, __func__); +} -struct scic_sds_port_state_handler +static enum sci_status scic_sds_port_default_start_io_handler(struct scic_sds_port *sci_port, + struct scic_sds_remote_device *sci_dev, + struct scic_sds_request *sci_req) +{ + return default_port_handler(&sci_port->parent, __func__); +} + +static enum sci_status scic_sds_port_default_complete_io_handler(struct scic_sds_port *sci_port, + struct scic_sds_remote_device *sci_dev, + struct scic_sds_request *sci_req) +{ + return default_port_handler(&sci_port->parent, __func__); +} + + + +static struct scic_sds_port_state_handler scic_sds_port_ready_substate_handler_table[SCIC_SDS_PORT_READY_MAX_SUBSTATES] = { /* SCIC_SDS_PORT_READY_SUBSTATE_WAITING */ @@ -1541,7 +1600,6 @@ scic_sds_port_ready_substate_handler_table[SCIC_SDS_PORT_READY_MAX_SUBSTATES] = } }; - /** * scic_sds_port_set_ready_state_handlers() - * @@ -1584,7 +1642,7 @@ static void scic_sds_port_suspend_port_task_scheduler( * ongoing requests. * */ -void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) +static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) { u32 command; struct scu_task_context *task_context; @@ -1609,7 +1667,7 @@ void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) * @sci_port: The port on which the task must be aborted. * */ -void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) +static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) { struct scic_sds_controller *scic = sci_port->owning_controller; u16 tci = sci_port->reserved_tci; @@ -1801,7 +1859,7 @@ static void scic_sds_port_ready_substate_configuring_exit( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_port_ready_substate_table[] = { +static const struct sci_base_state scic_sds_port_ready_substate_table[] = { [SCIC_SDS_PORT_READY_SUBSTATE_WAITING] = { .enter_state = scic_sds_port_ready_substate_waiting_enter, }, @@ -1815,103 +1873,6 @@ const struct sci_base_state scic_sds_port_ready_substate_table[] = { }, }; -static enum sci_status default_port_handler(struct sci_base_port *base_port, const char *func) -{ - struct scic_sds_port *sci_port; - - sci_port = container_of(base_port, typeof(*sci_port), parent); - dev_warn(sciport_to_dev(sci_port), - "%s: in wrong state: %d\n", func, - sci_base_state_machine_get_state(&base_port->state_machine)); - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status scic_sds_port_default_start_handler(struct sci_base_port *base_port) -{ - return default_port_handler(base_port, __func__); -} - -static enum sci_status scic_sds_port_default_stop_handler(struct sci_base_port *base_port) -{ - return default_port_handler(base_port, __func__); -} - -enum sci_status scic_sds_port_default_destruct_handler(struct sci_base_port *base_port) -{ - return default_port_handler(base_port, __func__); -} - -enum sci_status scic_sds_port_default_reset_handler(struct sci_base_port *base_port, - u32 timeout) -{ - return default_port_handler(base_port, __func__); -} - -static enum sci_status scic_sds_port_default_add_phy_handler(struct sci_base_port *base_port, - struct sci_base_phy *base_phy) -{ - return default_port_handler(base_port, __func__); -} - -enum sci_status scic_sds_port_default_remove_phy_handler(struct sci_base_port *base_port, - struct sci_base_phy *base_phy) -{ - return default_port_handler(base_port, __func__); -} - -/** - * scic_sds_port_default_frame_handler - * @port: This is the struct sci_base_port object which is cast into a struct scic_sds_port - * object. - * - * This is the default method for a port unsolicited frame request. It will - * report a warning and exit. enum sci_status SCI_FAILURE_INVALID_STATE Is it even - * possible to receive an unsolicited frame directed to a port object? It - * seems possible if we implementing virtual functions but until then? - */ -enum sci_status scic_sds_port_default_frame_handler(struct scic_sds_port *sci_port, - u32 frame_index) -{ - struct scic_sds_controller *scic = scic_sds_port_get_controller(sci_port); - - default_port_handler(&sci_port->parent, __func__); - scic_sds_controller_release_frame(scic, frame_index); - - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status scic_sds_port_default_event_handler(struct scic_sds_port *sci_port, - u32 event_code) -{ - return default_port_handler(&sci_port->parent, __func__); -} - -void scic_sds_port_default_link_up_handler(struct scic_sds_port *sci_port, - struct scic_sds_phy *sci_phy) -{ - default_port_handler(&sci_port->parent, __func__); -} - -void scic_sds_port_default_link_down_handler(struct scic_sds_port *sci_port, - struct scic_sds_phy *sci_phy) -{ - default_port_handler(&sci_port->parent, __func__); -} - -enum sci_status scic_sds_port_default_start_io_handler(struct scic_sds_port *sci_port, - struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) -{ - return default_port_handler(&sci_port->parent, __func__); -} - -static enum sci_status scic_sds_port_default_complete_io_handler(struct scic_sds_port *sci_port, - struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) -{ - return default_port_handler(&sci_port->parent, __func__); -} - /** * * @port: This is the struct scic_sds_port object on which the io request count will @@ -2248,9 +2209,7 @@ static void scic_sds_port_reset_state_link_down_handler( scic_sds_port_deactivate_phy(this_port, phy, false); } -/* --------------------------------------------------------------------------- */ - -struct scic_sds_port_state_handler +static struct scic_sds_port_state_handler scic_sds_port_state_handler_table[SCI_BASE_PORT_MAX_STATES] = { /* SCI_BASE_PORT_STATE_STOPPED */ @@ -2384,7 +2343,7 @@ static void scic_sds_port_disable_port_task_scheduler( scu_port_task_scheduler_write(this_port, control, pts_control_value); } -void scic_sds_port_post_dummy_remote_node(struct scic_sds_port *sci_port) +static void scic_sds_port_post_dummy_remote_node(struct scic_sds_port *sci_port) { struct scic_sds_controller *scic = sci_port->owning_controller; u8 phys_index = sci_port->physical_port_index; @@ -2412,7 +2371,7 @@ void scic_sds_port_post_dummy_remote_node(struct scic_sds_port *sci_port) scic_sds_controller_post_request(scic, command); } -void scic_sds_port_invalidate_dummy_remote_node(struct scic_sds_port *sci_port) +static void scic_sds_port_invalidate_dummy_remote_node(struct scic_sds_port *sci_port) { struct scic_sds_controller *scic = sci_port->owning_controller; u8 phys_index = sci_port->physical_port_index; @@ -2639,7 +2598,7 @@ static void scic_sds_port_failed_state_enter(struct sci_base_object *object) /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_port_state_table[] = { +static const struct sci_base_state scic_sds_port_state_table[] = { [SCI_BASE_PORT_STATE_STOPPED] = { .enter_state = scic_sds_port_stopped_state_enter, .exit_state = scic_sds_port_stopped_state_exit @@ -2661,3 +2620,34 @@ const struct sci_base_state scic_sds_port_state_table[] = { } }; +void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 port_index, + struct scic_sds_controller *scic) +{ + u32 index; + + sci_base_port_construct(&sci_port->parent, scic_sds_port_state_table); + + sci_base_state_machine_construct(&sci_port->ready_substate_machine, + &sci_port->parent.parent, + scic_sds_port_ready_substate_table, + SCIC_SDS_PORT_READY_SUBSTATE_WAITING); + + sci_port->logical_port_index = SCIC_SDS_DUMMY_PORT; + sci_port->physical_port_index = port_index; + sci_port->active_phy_mask = 0; + + sci_port->owning_controller = scic; + + sci_port->started_request_count = 0; + sci_port->assigned_device_count = 0; + + sci_port->reserved_rni = SCU_DUMMY_INDEX; + sci_port->reserved_tci = SCU_DUMMY_INDEX; + + sci_port->timer_handle = NULL; + + sci_port->port_task_scheduler_registers = NULL; + + for (index = 0; index < SCI_MAX_PHYS; index++) + sci_port->phy_table[index] = NULL; +} diff --git a/drivers/scsi/isci/core/scic_sds_port.h b/drivers/scsi/isci/core/scic_sds_port.h index ac81a92c348..8167f5e3be3 100644 --- a/drivers/scsi/isci/core/scic_sds_port.h +++ b/drivers/scsi/isci/core/scic_sds_port.h @@ -236,12 +236,6 @@ struct scic_sds_port_state_handler { }; -extern const struct sci_base_state scic_sds_port_state_table[]; -extern const struct sci_base_state scic_sds_port_ready_substate_table[]; - -extern struct scic_sds_port_state_handler scic_sds_port_state_handler_table[]; -extern struct scic_sds_port_state_handler scic_sds_port_ready_substate_handler_table[]; - /** * scic_sds_port_get_controller() - * @@ -351,10 +345,6 @@ void scic_sds_port_setup_transports( struct scic_sds_port *this_port, u32 device_id); -void scic_sds_port_activate_phy( - struct scic_sds_port *this_port, - struct scic_sds_phy *phy, - bool do_notify_user); void scic_sds_port_deactivate_phy( struct scic_sds_port *this_port, @@ -363,10 +353,6 @@ void scic_sds_port_deactivate_phy( -void scic_sds_port_general_link_up_handler( - struct scic_sds_port *this_port, - struct scic_sds_phy *the_phy, - bool do_notify_user); bool scic_sds_port_link_detected( struct scic_sds_port *this_port, @@ -397,47 +383,19 @@ enum sci_status scic_sds_port_complete_io( /* --------------------------------------------------------------------------- */ -void scic_sds_port_update_viit_entry( - struct scic_sds_port *this_port); /* --------------------------------------------------------------------------- */ -enum sci_status scic_sds_port_default_start_handler( - struct sci_base_port *port); -enum sci_status scic_sds_port_default_destruct_handler( - struct sci_base_port *port); -enum sci_status scic_sds_port_default_reset_handler( - struct sci_base_port *port, - u32 timeout); -enum sci_status scic_sds_port_default_remove_phy_handler( - struct sci_base_port *port, - struct sci_base_phy *phy); -enum sci_status scic_sds_port_default_frame_handler( - struct scic_sds_port *port, - u32 frame_index); -enum sci_status scic_sds_port_default_event_handler( - struct scic_sds_port *port, - u32 event_code); -void scic_sds_port_default_link_up_handler( - struct scic_sds_port *this_port, - struct scic_sds_phy *phy); -void scic_sds_port_default_link_down_handler( - struct scic_sds_port *this_port, - struct scic_sds_phy *phy); -enum sci_status scic_sds_port_default_start_io_handler( - struct scic_sds_port *port, - struct scic_sds_remote_device *device, - struct scic_sds_request *io_request); enum sci_sas_link_rate scic_sds_port_get_max_allowed_speed( @@ -451,12 +409,7 @@ bool scic_sds_port_is_valid_phy_assignment( struct scic_sds_port *this_port, u32 phy_index); -bool scic_sds_port_is_phy_mask_valid( - struct scic_sds_port *this_port, - u32 phy_mask); -u32 scic_sds_port_get_phys( - struct scic_sds_port *this_port); void scic_sds_port_get_sas_address( struct scic_sds_port *this_port, @@ -470,13 +423,7 @@ void scic_sds_port_get_attached_protocols( struct scic_sds_port *this_port, struct sci_sas_identify_address_frame_protocols *protocols); -enum sci_status scic_sds_port_set_phy( - struct scic_sds_port *port, - struct scic_sds_phy *phy); -enum sci_status scic_sds_port_clear_phy( - struct scic_sds_port *port, - struct scic_sds_phy *phy); diff --git a/drivers/scsi/isci/core/scic_sds_remote_device.c b/drivers/scsi/isci/core/scic_sds_remote_device.c index 05599d178ca..5ab8b0321de 100644 --- a/drivers/scsi/isci/core/scic_sds_remote_device.c +++ b/drivers/scsi/isci/core/scic_sds_remote_device.c @@ -87,30 +87,6 @@ u32 scic_remote_device_get_object_size(void) + sizeof(struct scic_sds_remote_node_context); } -/* --------------------------------------------------------------------------- */ - -void scic_remote_device_construct(struct scic_sds_port *sci_port, - struct scic_sds_remote_device *sci_dev) -{ - sci_dev->owning_port = sci_port; - sci_dev->started_request_count = 0; - sci_dev->rnc = (struct scic_sds_remote_node_context *) &sci_dev[1]; - - sci_base_remote_device_construct( - &sci_dev->parent, - scic_sds_remote_device_state_table - ); - - scic_sds_remote_node_context_construct( - sci_dev, - sci_dev->rnc, - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX - ); - - sci_object_set_association(sci_dev->rnc, sci_dev); -} - - enum sci_status scic_remote_device_da_construct( struct scic_sds_remote_device *sci_dev) { @@ -1330,7 +1306,7 @@ static enum sci_status scic_sds_remote_device_resetting_state_complete_request_h /* --------------------------------------------------------------------------- */ -const struct scic_sds_remote_device_state_handler scic_sds_remote_device_state_handler_table[] = { +static const struct scic_sds_remote_device_state_handler scic_sds_remote_device_state_handler_table[] = { [SCI_BASE_REMOTE_DEVICE_STATE_INITIAL] = { .parent.start_handler = scic_sds_remote_device_default_start_handler, .parent.stop_handler = scic_sds_remote_device_default_stop_handler, @@ -1741,7 +1717,7 @@ static void scic_sds_remote_device_final_state_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_remote_device_state_table[] = { +static const struct sci_base_state scic_sds_remote_device_state_table[] = { [SCI_BASE_REMOTE_DEVICE_STATE_INITIAL] = { .enter_state = scic_sds_remote_device_initial_state_enter, }, @@ -1771,3 +1747,23 @@ const struct sci_base_state scic_sds_remote_device_state_table[] = { }, }; +void scic_remote_device_construct(struct scic_sds_port *sci_port, + struct scic_sds_remote_device *sci_dev) +{ + sci_dev->owning_port = sci_port; + sci_dev->started_request_count = 0; + sci_dev->rnc = (struct scic_sds_remote_node_context *) &sci_dev[1]; + + sci_base_remote_device_construct( + &sci_dev->parent, + scic_sds_remote_device_state_table + ); + + scic_sds_remote_node_context_construct( + sci_dev, + sci_dev->rnc, + SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX + ); + + sci_object_set_association(sci_dev->rnc, sci_dev); +} diff --git a/drivers/scsi/isci/core/scic_sds_remote_device.h b/drivers/scsi/isci/core/scic_sds_remote_device.h index aa466249f22..725c0588f2a 100644 --- a/drivers/scsi/isci/core/scic_sds_remote_device.h +++ b/drivers/scsi/isci/core/scic_sds_remote_device.h @@ -316,15 +316,10 @@ struct scic_sds_remote_device_state_handler { scic_sds_remote_device_frame_handler_t frame_handler; }; -extern const struct sci_base_state scic_sds_remote_device_state_table[]; extern const struct sci_base_state scic_sds_ssp_remote_device_ready_substate_table[]; extern const struct sci_base_state scic_sds_stp_remote_device_ready_substate_table[]; extern const struct sci_base_state scic_sds_smp_remote_device_ready_substate_table[]; -extern const struct scic_sds_remote_device_state_handler scic_sds_remote_device_state_handler_table[]; -extern const struct scic_sds_remote_device_state_handler scic_sds_stp_remote_device_ready_substate_handler_table[]; -extern const struct scic_sds_remote_device_state_handler scic_sds_smp_remote_device_ready_substate_handler_table[]; - /** * scic_sds_remote_device_increment_request_count() - * diff --git a/drivers/scsi/isci/core/scic_sds_remote_node_context.c b/drivers/scsi/isci/core/scic_sds_remote_node_context.c index 253b2d8aa23..81e4ab34dd3 100644 --- a/drivers/scsi/isci/core/scic_sds_remote_node_context.c +++ b/drivers/scsi/isci/core/scic_sds_remote_node_context.c @@ -64,26 +64,6 @@ #include "scu_event_codes.h" #include "scu_task_context.h" -void scic_sds_remote_node_context_construct( - struct scic_sds_remote_device *device, - struct scic_sds_remote_node_context *rnc, - u16 remote_node_index) -{ - memset(rnc, 0, sizeof(struct scic_sds_remote_node_context)); - - rnc->remote_node_index = remote_node_index; - rnc->device = device; - rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - - sci_base_state_machine_construct( - &rnc->state_machine, - &rnc->parent, - scic_sds_remote_node_context_state_table, - SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE - ); - - sci_base_state_machine_start(&rnc->state_machine); -} /** * @@ -124,7 +104,7 @@ bool scic_sds_remote_node_context_is_ready( * * This method will construct the RNC buffer for this remote device object. none */ -void scic_sds_remote_node_context_construct_buffer( +static void scic_sds_remote_node_context_construct_buffer( struct scic_sds_remote_node_context *this_rnc) { union scu_remote_node_context *rnc; @@ -830,7 +810,7 @@ static enum sci_status scic_sds_remote_node_context_await_suspension_state_event /* --------------------------------------------------------------------------- */ -struct scic_sds_remote_node_context_handlers +static struct scic_sds_remote_node_context_handlers scic_sds_remote_node_context_state_handler_table[ SCIC_SDS_REMOTE_NODE_CONTEXT_MAX_STATES] = { @@ -1218,7 +1198,7 @@ static void scic_sds_remote_node_context_await_suspension_state_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_remote_node_context_state_table[] = { +static const struct sci_base_state scic_sds_remote_node_context_state_table[] = { [SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE] = { .enter_state = scic_sds_remote_node_context_initial_state_enter, }, @@ -1245,3 +1225,23 @@ const struct sci_base_state scic_sds_remote_node_context_state_table[] = { }, }; +void scic_sds_remote_node_context_construct( + struct scic_sds_remote_device *device, + struct scic_sds_remote_node_context *rnc, + u16 remote_node_index) +{ + memset(rnc, 0, sizeof(struct scic_sds_remote_node_context)); + + rnc->remote_node_index = remote_node_index; + rnc->device = device; + rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + + sci_base_state_machine_construct( + &rnc->state_machine, + &rnc->parent, + scic_sds_remote_node_context_state_table, + SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE + ); + + sci_base_state_machine_start(&rnc->state_machine); +} diff --git a/drivers/scsi/isci/core/scic_sds_remote_node_context.h b/drivers/scsi/isci/core/scic_sds_remote_node_context.h index c7c75ae6f09..eccad55ea57 100644 --- a/drivers/scsi/isci/core/scic_sds_remote_node_context.h +++ b/drivers/scsi/isci/core/scic_sds_remote_node_context.h @@ -279,19 +279,11 @@ struct scic_sds_remote_node_context { struct scic_sds_remote_node_context_handlers *state_handlers; }; -extern const struct sci_base_state scic_sds_remote_node_context_state_table[]; - -extern struct scic_sds_remote_node_context_handlers - scic_sds_remote_node_context_state_handler_table[ - SCIC_SDS_REMOTE_NODE_CONTEXT_MAX_STATES]; - void scic_sds_remote_node_context_construct( struct scic_sds_remote_device *device, struct scic_sds_remote_node_context *rnc, u16 remote_node_index); -void scic_sds_remote_node_context_construct_buffer( - struct scic_sds_remote_node_context *rnc); bool scic_sds_remote_node_context_is_ready( struct scic_sds_remote_node_context *this_rnc); diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index 4542f4e75f7..45b8571726f 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -325,59 +325,6 @@ void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) } } -/** - * This method initializes common portions of the io request object. This - * includes construction of the struct sci_base_request parent. - * @the_controller: This parameter specifies the controller for which the - * request is being constructed. - * @the_target: This parameter specifies the remote device for which the - * request is being constructed. - * @io_tag: This parameter specifies the IO tag to be utilized for this - * request. This parameter can be set to SCI_CONTROLLER_INVALID_IO_TAG. - * @user_io_request_object: This parameter specifies the user request object - * for which the request is being constructed. - * @this_request: This parameter specifies the request being constructed. - * - */ -static void scic_sds_general_request_construct( - struct scic_sds_controller *the_controller, - struct scic_sds_remote_device *the_target, - u16 io_tag, - void *user_io_request_object, - struct scic_sds_request *this_request) -{ - sci_base_request_construct( - &this_request->parent, - scic_sds_request_state_table - ); - - this_request->io_tag = io_tag; - this_request->user_request = user_io_request_object; - this_request->owning_controller = the_controller; - this_request->target_device = the_target; - this_request->has_started_substate_machine = false; - this_request->protocol = SCIC_NO_PROTOCOL; - this_request->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; - this_request->device_sequence = scic_sds_remote_device_get_sequence(the_target); - - this_request->sci_status = SCI_SUCCESS; - this_request->scu_status = 0; - this_request->post_context = 0xFFFFFFFF; - - this_request->is_task_management_request = false; - - if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - this_request->was_tag_assigned_by_user = false; - this_request->task_context_buffer = NULL; - } else { - this_request->was_tag_assigned_by_user = true; - - this_request->task_context_buffer = - scic_sds_controller_get_task_context_buffer( - this_request->owning_controller, io_tag); - } -} - /** * This method build the remainder of the IO request object. * @this_request: This parameter specifies the request object being constructed. @@ -754,16 +701,6 @@ static enum sci_status scic_io_request_construct_sata(struct scic_sds_request *s return status; } -/* - * **************************************************************************** - * * SCIC Interface Implementation - * **************************************************************************** */ - - - - -/* --------------------------------------------------------------------------- */ - u32 scic_io_request_get_object_size(void) { u32 ssp_request_size; @@ -777,128 +714,6 @@ u32 scic_io_request_get_object_size(void) return max(ssp_request_size, max(stp_request_size, smp_request_size)); } -/* --------------------------------------------------------------------------- */ - - -/* --------------------------------------------------------------------------- */ - - -/* --------------------------------------------------------------------------- */ - - -/* --------------------------------------------------------------------------- */ - -enum sci_status scic_io_request_construct( - struct scic_sds_controller *scic_controller, - struct scic_sds_remote_device *scic_remote_device, - u16 io_tag, - void *user_io_request_object, - void *scic_io_request_memory, - struct scic_sds_request **new_scic_io_request_handle) -{ - enum sci_status status = SCI_SUCCESS; - struct scic_sds_request *this_request; - struct smp_discover_response_protocols device_protocol; - - this_request = (struct scic_sds_request *)scic_io_request_memory; - - /* Build the common part of the request */ - scic_sds_general_request_construct( - (struct scic_sds_controller *)scic_controller, - (struct scic_sds_remote_device *)scic_remote_device, - io_tag, - user_io_request_object, - this_request - ); - - if ( - scic_sds_remote_device_get_index((struct scic_sds_remote_device *)scic_remote_device) - == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX - ) { - return SCI_FAILURE_INVALID_REMOTE_DEVICE; - } - - scic_remote_device_get_protocols(scic_remote_device, &device_protocol); - - if (device_protocol.u.bits.attached_ssp_target) { - scic_sds_ssp_io_request_assign_buffers(this_request); - } else if (device_protocol.u.bits.attached_stp_target) { - scic_sds_stp_request_assign_buffers(this_request); - memset(this_request->command_buffer, 0, sizeof(struct sata_fis_reg_h2d)); - } else if (device_protocol.u.bits.attached_smp_target) { - scic_sds_smp_request_assign_buffers(this_request); - memset(this_request->command_buffer, 0, sizeof(struct smp_request)); - } else { - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - } - - if (status == SCI_SUCCESS) { - memset( - this_request->task_context_buffer, - 0, - SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab) - ); - *new_scic_io_request_handle = scic_io_request_memory; - } - - return status; -} - -/* --------------------------------------------------------------------------- */ - - -enum sci_status scic_task_request_construct( - struct scic_sds_controller *controller, - struct scic_sds_remote_device *remote_device, - u16 io_tag, - void *user_io_request_object, - void *scic_task_request_memory, - struct scic_sds_request **new_scic_task_request_handle) -{ - enum sci_status status = SCI_SUCCESS; - struct scic_sds_request *this_request = (struct scic_sds_request *) - scic_task_request_memory; - struct smp_discover_response_protocols device_protocol; - - /* Build the common part of the request */ - scic_sds_general_request_construct( - (struct scic_sds_controller *)controller, - (struct scic_sds_remote_device *)remote_device, - io_tag, - user_io_request_object, - this_request - ); - - scic_remote_device_get_protocols(remote_device, &device_protocol); - - if (device_protocol.u.bits.attached_ssp_target) { - scic_sds_ssp_task_request_assign_buffers(this_request); - - this_request->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &this_request->started_substate_machine, - &this_request->parent.parent, - scic_sds_io_request_started_task_mgmt_substate_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); - } else if (device_protocol.u.bits.attached_stp_target) { - scic_sds_stp_request_assign_buffers(this_request); - } else { - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - } - - if (status == SCI_SUCCESS) { - this_request->is_task_management_request = true; - memset(this_request->task_context_buffer, 0x00, sizeof(struct scu_task_context)); - *new_scic_task_request_handle = scic_task_request_memory; - } - - return status; -} - - enum sci_status scic_io_request_construct_basic_ssp( struct scic_sds_request *sci_req) { @@ -1915,9 +1730,7 @@ static enum sci_status scic_sds_request_aborting_state_frame_handler( return SCI_SUCCESS; } -/* --------------------------------------------------------------------------- */ - -const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_default_abort_handler, @@ -2142,9 +1955,7 @@ static void scic_sds_request_final_state_enter( ); } -/* --------------------------------------------------------------------------- */ - -const struct sci_base_state scic_sds_request_state_table[] = { +static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { .enter_state = scic_sds_request_initial_state_enter, }, @@ -2166,3 +1977,119 @@ const struct sci_base_state scic_sds_request_state_table[] = { }, }; +static void scic_sds_general_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + void *user_io_request_object, + struct scic_sds_request *sci_req) +{ + sci_base_request_construct(&sci_req->parent, scic_sds_request_state_table); + sci_req->io_tag = io_tag; + sci_req->user_request = user_io_request_object; + sci_req->owning_controller = scic; + sci_req->target_device = sci_dev; + sci_req->has_started_substate_machine = false; + sci_req->protocol = SCIC_NO_PROTOCOL; + sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; + sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); + + sci_req->sci_status = SCI_SUCCESS; + sci_req->scu_status = 0; + sci_req->post_context = 0xFFFFFFFF; + + sci_req->is_task_management_request = false; + + if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + sci_req->was_tag_assigned_by_user = false; + sci_req->task_context_buffer = NULL; + } else { + sci_req->was_tag_assigned_by_user = true; + + sci_req->task_context_buffer = + scic_sds_controller_get_task_context_buffer(scic, io_tag); + } +} + +enum sci_status scic_io_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + void *user_io_request_object, + struct scic_sds_request *sci_req, + struct scic_sds_request **new_scic_io_request_handle) +{ + enum sci_status status = SCI_SUCCESS; + struct smp_discover_response_protocols device_protocol; + + /* Build the common part of the request */ + scic_sds_general_request_construct(scic, sci_dev, io_tag, + user_io_request_object, sci_req); + + if (sci_dev->rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_REMOTE_DEVICE; + + scic_remote_device_get_protocols(sci_dev, &device_protocol); + + if (device_protocol.u.bits.attached_ssp_target) { + scic_sds_ssp_io_request_assign_buffers(sci_req); + } else if (device_protocol.u.bits.attached_stp_target) { + scic_sds_stp_request_assign_buffers(sci_req); + memset(sci_req->command_buffer, 0, sizeof(struct sata_fis_reg_h2d)); + } else if (device_protocol.u.bits.attached_smp_target) { + scic_sds_smp_request_assign_buffers(sci_req); + memset(sci_req->command_buffer, 0, sizeof(struct smp_request)); + } else { + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + } + + if (status == SCI_SUCCESS) { + memset(sci_req->task_context_buffer, 0, + SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab)); + *new_scic_io_request_handle = sci_req; + } + + return status; +} + +enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + void *user_io_request_object, + struct scic_sds_request *sci_req, + struct scic_sds_request **new_sci_req) +{ + enum sci_status status = SCI_SUCCESS; + struct smp_discover_response_protocols device_protocol; + + /* Build the common part of the request */ + scic_sds_general_request_construct(scic, sci_dev, io_tag, + user_io_request_object, + sci_req); + + scic_remote_device_get_protocols(sci_dev, &device_protocol); + + if (device_protocol.u.bits.attached_ssp_target) { + scic_sds_ssp_task_request_assign_buffers(sci_req); + + sci_req->has_started_substate_machine = true; + + /* Construct the started sub-state machine. */ + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_io_request_started_task_mgmt_substate_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION + ); + } else if (device_protocol.u.bits.attached_stp_target) { + scic_sds_stp_request_assign_buffers(sci_req); + } else { + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + } + + if (status == SCI_SUCCESS) { + sci_req->is_task_management_request = true; + memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); + *new_sci_req = sci_req; + } + + return status; +} diff --git a/drivers/scsi/isci/core/scic_sds_request.h b/drivers/scsi/isci/core/scic_sds_request.h index 06b53c3b0aa..c54d8ef79ed 100644 --- a/drivers/scsi/isci/core/scic_sds_request.h +++ b/drivers/scsi/isci/core/scic_sds_request.h @@ -256,14 +256,7 @@ struct scic_sds_io_request_state_handler { }; -extern const struct sci_base_state scic_sds_request_state_table[]; -extern const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[]; - extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[]; -extern const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[]; - -extern const struct sci_base_state scic_sds_smp_request_started_substate_table[]; -extern const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[]; /** * diff --git a/drivers/scsi/isci/core/scic_sds_smp_remote_device.c b/drivers/scsi/isci/core/scic_sds_smp_remote_device.c index fb832ef544e..55202481e09 100644 --- a/drivers/scsi/isci/core/scic_sds_smp_remote_device.c +++ b/drivers/scsi/isci/core/scic_sds_smp_remote_device.c @@ -207,7 +207,7 @@ static enum sci_status scic_sds_smp_remote_device_ready_cmd_substate_frame_handl /* --------------------------------------------------------------------------- */ -const struct scic_sds_remote_device_state_handler scic_sds_smp_remote_device_ready_substate_handler_table[] = { +static const struct scic_sds_remote_device_state_handler scic_sds_smp_remote_device_ready_substate_handler_table[] = { [SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = { .parent.start_handler = scic_sds_remote_device_default_start_handler, .parent.stop_handler = scic_sds_remote_device_ready_state_stop_handler, diff --git a/drivers/scsi/isci/core/scic_sds_smp_request.c b/drivers/scsi/isci/core/scic_sds_smp_request.c index 962bd3994f1..84b0fdd7ed9 100644 --- a/drivers/scsi/isci/core/scic_sds_smp_request.c +++ b/drivers/scsi/isci/core/scic_sds_smp_request.c @@ -142,73 +142,6 @@ void scic_sds_smp_request_assign_buffers( } } -/** - * This method is called by the SCI user to build an SMP IO request. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the SMP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. Please refer to the sci_object_set_association() routine for more - * information. - */ -enum sci_status scic_io_request_construct_smp( - struct scic_sds_request *sci_req) -{ - struct smp_request *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); - - if (!smp_req) - return SCI_FAILURE_INSUFFICIENT_RESOURCES; - - sci_req->protocol = SCIC_SMP_PROTOCOL; - sci_req->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - &sci_req->parent.parent, - scic_sds_smp_request_started_substate_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); - - /* Construct the SMP SCU Task Context */ - memcpy(smp_req, sci_req->command_buffer, sizeof(*smp_req)); - - /* - * Look at the SMP requests' header fields; for certain SAS 1.x SMP - * functions under SAS 2.0, a zero request length really indicates - * a non-zero default length. */ - if (smp_req->header.request_length == 0) { - switch (smp_req->header.function) { - case SMP_FUNCTION_DISCOVER: - case SMP_FUNCTION_REPORT_PHY_ERROR_LOG: - case SMP_FUNCTION_REPORT_PHY_SATA: - case SMP_FUNCTION_REPORT_ROUTE_INFORMATION: - smp_req->header.request_length = 2; - break; - case SMP_FUNCTION_CONFIGURE_ROUTE_INFORMATION: - case SMP_FUNCTION_PHY_CONTROL: - case SMP_FUNCTION_PHY_TEST: - smp_req->header.request_length = 9; - break; - /* Default - zero is a valid default for 2.0. */ - } - } - - scu_smp_request_construct_task_context(sci_req, smp_req); - - sci_base_state_machine_change_state( - &sci_req->parent.state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED - ); - - kfree(smp_req); - - return SCI_SUCCESS; -} /** * This method is called by the SCI user to build an SMP pass-through IO @@ -595,7 +528,7 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha } -const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[] = { [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, @@ -658,7 +591,7 @@ static void scic_sds_smp_request_started_await_tc_completion_substate_enter( ); } -const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { +static const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { .enter_state = scic_sds_smp_request_started_await_response_substate_enter, }, @@ -667,3 +600,69 @@ const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { }, }; +/** + * This method is called by the SCI user to build an SMP IO request. + * + * - The user must have previously called scic_io_request_construct() on the + * supplied IO request. Indicate if the controller successfully built the IO + * request. SCI_SUCCESS This value is returned if the IO request was + * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned + * if the remote_device does not support the SMP protocol. + * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not + * properly set the association between the SCIC IO request and the user's IO + * request. Please refer to the sci_object_set_association() routine for more + * information. + */ +enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) +{ + struct smp_request *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); + + if (!smp_req) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + + sci_req->protocol = SCIC_SMP_PROTOCOL; + sci_req->has_started_substate_machine = true; + + /* Construct the started sub-state machine. */ + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_smp_request_started_substate_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE + ); + + /* Construct the SMP SCU Task Context */ + memcpy(smp_req, sci_req->command_buffer, sizeof(*smp_req)); + + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. */ + if (smp_req->header.request_length == 0) { + switch (smp_req->header.function) { + case SMP_FUNCTION_DISCOVER: + case SMP_FUNCTION_REPORT_PHY_ERROR_LOG: + case SMP_FUNCTION_REPORT_PHY_SATA: + case SMP_FUNCTION_REPORT_ROUTE_INFORMATION: + smp_req->header.request_length = 2; + break; + case SMP_FUNCTION_CONFIGURE_ROUTE_INFORMATION: + case SMP_FUNCTION_PHY_CONTROL: + case SMP_FUNCTION_PHY_TEST: + smp_req->header.request_length = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + + scu_smp_request_construct_task_context(sci_req, smp_req); + + sci_base_state_machine_change_state( + &sci_req->parent.state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED + ); + + kfree(smp_req); + + return SCI_SUCCESS; +} diff --git a/drivers/scsi/isci/core/scic_sds_ssp_request.c b/drivers/scsi/isci/core/scic_sds_ssp_request.c index a826e4bf292..c9aa35f5cd1 100644 --- a/drivers/scsi/isci/core/scic_sds_ssp_request.c +++ b/drivers/scsi/isci/core/scic_sds_ssp_request.c @@ -193,7 +193,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler return SCI_SUCCESS; } -const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[] = { [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, diff --git a/drivers/scsi/isci/core/scic_sds_stp_pio_request.h b/drivers/scsi/isci/core/scic_sds_stp_pio_request.h index 64bf40a6e1d..d4dc118f2de 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_pio_request.h +++ b/drivers/scsi/isci/core/scic_sds_stp_pio_request.h @@ -99,18 +99,7 @@ enum _SCIC_SDS_STP_REQUEST_STARTED_PIO_SUBSTATES { SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, }; - -/* --------------------------------------------------------------------------- */ - -extern const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[]; - -extern const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[]; - -/* --------------------------------------------------------------------------- */ - struct scic_sds_stp_request; -struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl( - struct scic_sds_stp_request *this_request); #endif /* _SCIC_SDS_SATA_PIO_REQUEST_H_ */ diff --git a/drivers/scsi/isci/core/scic_sds_stp_remote_device.c b/drivers/scsi/isci/core/scic_sds_stp_remote_device.c index cb396d12777..193a95fa90e 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_remote_device.c +++ b/drivers/scsi/isci/core/scic_sds_stp_remote_device.c @@ -565,7 +565,7 @@ enum sci_status scic_sds_stp_remote_device_ready_atapi_error_substate_event_hand /* --------------------------------------------------------------------------- */ -const struct scic_sds_remote_device_state_handler scic_sds_stp_remote_device_ready_substate_handler_table[] = { +static const struct scic_sds_remote_device_state_handler scic_sds_stp_remote_device_ready_substate_handler_table[] = { [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = { .parent.start_handler = scic_sds_remote_device_default_start_handler, .parent.stop_handler = scic_sds_remote_device_ready_state_stop_handler, diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c index 8da309f81ac..0e961e9cd6e 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ b/drivers/scsi/isci/core/scic_sds_stp_request.c @@ -267,7 +267,7 @@ static void scu_sata_reqeust_construct_task_context( * This method will perform any general sata request construction. What part of * SATA IO request construction is general? none */ -void scic_sds_stp_non_ncq_request_construct( +static void scic_sds_stp_non_ncq_request_construct( struct scic_sds_request *this_request) { this_request->has_started_substate_machine = true; @@ -318,33 +318,6 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc } } -/** - * - * @sci_req: This parameter specifies the request to be constructed. - * - * This method will construct the STP UDMA request and its associated TC data. - * This method returns an indication as to whether the construction was - * successful. SCI_SUCCESS Currently this method always returns this value. - */ -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - scic_sds_stp_non_ncq_request_construct(sci_req); - - scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, - len, dir); - - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - &sci_req->parent.parent, - scic_sds_stp_request_started_udma_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - /** * * @sci_req: This parameter specifies the request to be constructed. @@ -364,7 +337,7 @@ enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_ } /** - * + * scu_stp_raw_request_construct_task_context - * @this_request: This parameter specifies the STP request object for which to * construct a RAW command frame task context. * @task_context: This parameter specifies the SCU specific task context buffer @@ -373,7 +346,7 @@ enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_ * This method performs the operations common to all SATA/STP requests * utilizing the raw frame method. none */ -void scu_stp_raw_request_construct_task_context( +static void scu_stp_raw_request_construct_task_context( struct scic_sds_stp_request *this_request, struct scu_task_context *task_context) { @@ -386,59 +359,6 @@ void scu_stp_raw_request_construct_task_context( task_context->transfer_length_bytes = sizeof(struct sata_fis_reg_h2d) - sizeof(u32); } -/** - * - * @this_request: This parameter specifies the core request object to - * construction into an STP/SATA non-data request. - * - * This method will construct the STP Non-data request and its associated TC - * data. A non-data request essentially behaves like a 0 length read request - * in the SCU. This method currently always returns SCI_SUCCESS - */ -enum sci_status scic_sds_stp_non_data_request_construct( - struct scic_sds_request *this_request) -{ - scic_sds_stp_non_ncq_request_construct(this_request); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context( - (struct scic_sds_stp_request *)this_request, - this_request->task_context_buffer - ); - - sci_base_state_machine_construct( - &this_request->started_substate_machine, - &this_request->parent.parent, - scic_sds_stp_request_started_non_data_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - - -enum sci_status scic_sds_stp_soft_reset_request_construct( - struct scic_sds_request *this_request) -{ - scic_sds_stp_non_ncq_request_construct(this_request); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context( - (struct scic_sds_stp_request *)this_request, - this_request->task_context_buffer - ); - - sci_base_state_machine_construct( - &this_request->started_substate_machine, - &this_request->parent.parent, - scic_sds_stp_request_started_soft_reset_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - - void scic_stp_io_request_set_ncq_tag( struct scic_sds_request *req, u16 ncq_tag) @@ -474,7 +394,7 @@ void *scic_stp_io_request_get_d2h_reg_address( * - if there are more SGL element pairs - advance to the next pair and return * element A struct scu_sgl_element* */ -struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) +static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) { struct scu_sgl_element *current_sgl; struct scic_sds_request *sci_req = &stp_req->parent; @@ -508,60 +428,6 @@ struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_st return current_sgl; } -/** - * - * @scic_io_request: The core request object which is cast to a SATA PIO - * request object. - * - * This method will construct the SATA PIO request. This method returns an - * indication as to whether the construction was successful. SCI_SUCCESS - * Currently this method always returns this value. - */ -enum sci_status scic_sds_stp_pio_request_construct( - struct scic_sds_request *scic_io_request, - u8 sat_protocol, - bool copy_rx_frame) -{ - struct scic_sds_stp_request *this_request; - - this_request = (struct scic_sds_stp_request *)scic_io_request; - - scic_sds_stp_non_ncq_request_construct(&this_request->parent); - - scu_stp_raw_request_construct_task_context( - this_request, this_request->parent.task_context_buffer - ); - - this_request->type.pio.current_transfer_bytes = 0; - this_request->type.pio.ending_error = 0; - this_request->type.pio.ending_status = 0; - - this_request->type.pio.request_current.sgl_offset = 0; - this_request->type.pio.request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; - this_request->type.pio.sat_protocol = sat_protocol; - - if (copy_rx_frame) { - scic_sds_request_build_sgl(&this_request->parent); - /* - * Since the IO request copy of the TC contains the same data as - * the actual TC this pointer is vaild for either. */ - this_request->type.pio.request_current.sgl_pair = - &this_request->parent.task_context_buffer->sgl_pair_ab; - } else { - /* The user does not want the data copied to the SGL buffer location */ - this_request->type.pio.request_current.sgl_pair = NULL; - } - - sci_base_state_machine_construct( - &this_request->parent.started_substate_machine, - &this_request->parent.parent.parent, - scic_sds_stp_request_started_pio_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - /** * * @this_request: @@ -689,7 +555,7 @@ static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( /* --------------------------------------------------------------------------- */ -const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, @@ -740,7 +606,7 @@ static void scic_sds_stp_request_started_non_data_await_d2h_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = { +static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, }, @@ -749,6 +615,23 @@ const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table }, }; +enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req) +{ + struct scic_sds_stp_request *stp_req = container_of(sci_req, typeof(*stp_req), parent); + + scic_sds_stp_non_ncq_request_construct(sci_req); + + /* Build the STP task context structure */ + scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_stp_request_started_non_data_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} + #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ /** @@ -1330,7 +1213,7 @@ static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler /* --------------------------------------------------------------------------- */ -const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, @@ -1422,7 +1305,7 @@ static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = { +static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, }, @@ -1437,6 +1320,45 @@ const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = } }; +enum sci_status scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, + u8 sat_protocol, + bool copy_rx_frame) +{ + struct scic_sds_stp_request *stp_req = container_of(sci_req, typeof(*stp_req), parent); + struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; + + scic_sds_stp_non_ncq_request_construct(sci_req); + + scu_stp_raw_request_construct_task_context(stp_req, + sci_req->task_context_buffer); + + pio->current_transfer_bytes = 0; + pio->ending_error = 0; + pio->ending_status = 0; + + pio->request_current.sgl_offset = 0; + pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; + pio->sat_protocol = sat_protocol; + + if (copy_rx_frame) { + scic_sds_request_build_sgl(sci_req); + /* Since the IO request copy of the TC contains the same data as + * the actual TC this pointer is vaild for either. + */ + pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; + } else { + /* The user does not want the data copied to the SGL buffer location */ + pio->request_current.sgl_pair = NULL; + } + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_stp_request_started_pio_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} + static void scic_sds_stp_request_udma_complete_request( struct scic_sds_request *this_request, u32 scu_status, @@ -1594,7 +1516,7 @@ static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler /* --------------------------------------------------------------------------- */ -const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, @@ -1648,7 +1570,7 @@ static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( /* --------------------------------------------------------------------------- */ -const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = { +static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, }, @@ -1657,6 +1579,25 @@ const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = }, }; +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) +{ + scic_sds_stp_non_ncq_request_construct(sci_req); + + scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, + len, dir); + + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_stp_request_started_udma_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE + ); + + return SCI_SUCCESS; +} + /** * * @this_request: @@ -1831,7 +1772,7 @@ static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( /* --------------------------------------------------------------------------- */ -const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = { +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { .parent.start_handler = scic_sds_request_default_start_handler, .parent.abort_handler = scic_sds_request_started_state_abort_handler, @@ -1925,9 +1866,7 @@ static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( ); } -/* --------------------------------------------------------------------------- */ - -const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = { +static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = { [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, }, @@ -1939,3 +1878,19 @@ const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_tab }, }; +enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req) +{ + struct scic_sds_stp_request *stp_req = container_of(sci_req, typeof(*stp_req), parent); + + scic_sds_stp_non_ncq_request_construct(sci_req); + + /* Build the STP task context structure */ + scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + &sci_req->parent.parent, + scic_sds_stp_request_started_soft_reset_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.h b/drivers/scsi/isci/core/scic_sds_stp_request.h index c950bb33ee0..107487978f0 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.h +++ b/drivers/scsi/isci/core/scic_sds_stp_request.h @@ -76,7 +76,7 @@ struct scic_sds_stp_request { u32 udma; - struct { + struct scic_sds_stp_pio_request { /** * Total transfer for the entire PIO request recorded at request constuction * time. @@ -169,26 +169,8 @@ enum SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_SUBSTATES { SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, }; -extern const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[]; - -extern const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[]; - -extern const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[]; - -extern const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[]; - -extern const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[]; - -extern const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[]; - -/* --------------------------------------------------------------------------- */ - u32 scic_sds_stp_request_get_object_size(void); - -void scic_sds_stp_non_ncq_request_construct( - struct scic_sds_request *this_request); - enum sci_status scic_sds_stp_pio_request_construct( struct scic_sds_request *scic_io_request, u8 sat_protocol, @@ -214,8 +196,5 @@ enum sci_status scic_sds_stp_ncq_request_construct( u32 transfer_length, enum dma_data_direction dir); -void scu_stp_raw_request_construct_task_context( - struct scic_sds_stp_request *this_request, - struct scu_task_context *task_context); #endif /* _SCIC_SDS_STP_REQUEST_T_ */ diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README index cf7e4286e89..8056d2bd233 100644 --- a/drivers/scsi/isci/firmware/README +++ b/drivers/scsi/isci/firmware/README @@ -32,5 +32,5 @@ Header Type - u8: 0xf ============================================================================== Place isci_firmware.bin in /lib/firmware -Be sure to recreate the initramfs image to include the firmware. +Be sure to recreate the initramfs image to include the firmware. diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 666076a2834..74dc96dc6b1 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -201,7 +201,7 @@ void isci_port_link_up( call_status = scic_sata_phy_get_properties(phy, &sata_phy_properties); - /* + /* * XXX I am concerned about this "assert". shouldn't we * handle the return appropriately? */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index eba8e0b3c87..c6ce9d0c50c 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1263,47 +1263,6 @@ enum dma_data_direction isci_request_io_request_get_data_direction( * * physical address in the specified sge. */ -dma_addr_t isci_request_sge_get_address_field( - struct isci_request *request, - void *sge_address) -{ - struct sas_task *task = isci_request_access_task(request); - dma_addr_t ret; - struct isci_host *isci_host = isci_host_from_sas_ha( - task->dev->port->ha); - - dev_dbg(&isci_host->pdev->dev, - "%s: request = %p, sge_address = %p\n", - __func__, - request, - sge_address); - - if (task->data_dir == PCI_DMA_NONE) - return 0; - - /* the case where num_scatter == 0 is special, in that - * task->scatter is the actual buffer address, not an sgl. - * so a map single is required here. - */ - if ((task->num_scatter == 0) && - !sas_protocol_ata(task->task_proto)) { - ret = dma_map_single( - &isci_host->pdev->dev, - task->scatter, - task->total_xfer_len, - task->data_dir - ); - request->zero_scatter_daddr = ret; - } else - ret = sg_dma_address(((struct scatterlist *)sge_address)); - - dev_dbg(&isci_host->pdev->dev, - "%s: bus address = %lx\n", - __func__, - (unsigned long)ret); - - return ret; -} /** @@ -1314,38 +1273,6 @@ dma_addr_t isci_request_sge_get_address_field( * * length field value in the specified sge. */ -u32 isci_request_sge_get_length_field( - struct isci_request *request, - void *sge_address) -{ - struct sas_task *task = isci_request_access_task(request); - int ret; - - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p, sge_address = %p\n", - __func__, - request, - sge_address); - - if (task->data_dir == PCI_DMA_NONE) - return 0; - - /* the case where num_scatter == 0 is special, in that - * task->scatter is the actual buffer address, not an sgl. - * so we return total_xfer_len here. - */ - if (task->num_scatter == 0) - ret = task->total_xfer_len; - else - ret = sg_dma_len((struct scatterlist *)sge_address); - - dev_dbg(&request->isci_host->pdev->dev, - "%s: len = %d\n", - __func__, - ret); - - return ret; -} /** diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index b45c0f1f057..4a63bb6de44 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -391,13 +391,7 @@ static inline void *isci_request_io_request_get_next_sge( return ret; } -dma_addr_t isci_request_sge_get_address_field( - struct isci_request *request, - void *sge_address); -u32 isci_request_sge_get_length_field( - struct isci_request *request, - void *sge_address); void *isci_request_ssp_io_request_get_cdb_address( struct isci_request *request); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d4836800250..7e966840255 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -588,7 +588,7 @@ void isci_task_build_tmf( tmf->cb_data = cb_data; } -void isci_task_build_abort_task_tmf( +static void isci_task_build_abort_task_tmf( struct isci_tmf *tmf, struct isci_remote_device *isci_device, enum isci_tmf_function_codes code, @@ -1528,15 +1528,6 @@ void isci_task_request_complete( * * lun for specified task request. */ -u32 isci_task_ssp_request_get_lun(struct isci_request *request) -{ - struct isci_tmf *isci_tmf = isci_request_access_tmf(request); - - dev_dbg(&request->isci_host->pdev->dev, - "%s: lun = %d\n", __func__, isci_tmf->lun[0]); -/* @todo: build lun from array of bytes to 32 bit */ - return isci_tmf->lun[0]; -} /** * isci_task_ssp_request_get_function() - This function is called by the sci diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 9754b43f5b0..d7cb6fe268c 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -202,8 +202,6 @@ u16 isci_task_ssp_request_get_io_tag_to_manage( u8 isci_task_ssp_request_get_function( struct isci_request *request); -u32 isci_task_ssp_request_get_lun( - struct isci_request *request); void *isci_task_ssp_request_get_response_data_address( struct isci_request *request); @@ -226,14 +224,6 @@ void isci_task_build_tmf( void *), void *cb_data); -void isci_task_build_abort_task_tmf( - struct isci_tmf *tmf, - struct isci_remote_device *isci_device, - enum isci_tmf_function_codes code, - void (*tmf_sent_cb)( - enum isci_tmf_cb_state, - struct isci_tmf *, void *), - struct isci_request *old_request); int isci_task_execute_tmf( struct isci_host *isci_host, -- cgit v1.2.3-70-g09d2 From f219f010a355487638bf2fff4724a420e7158fd2 Mon Sep 17 00:00:00 2001 From: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Date: Thu, 31 Mar 2011 13:10:34 -0700 Subject: isci: Properly handle requests in the "aborting" state. When a TMF times-out, the request is set back to "aborting". Requests in the "aborting" state must be terminated when LUN and device resets occur. Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.h | 2 +- drivers/scsi/isci/task.c | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 4a63bb6de44..0c08da6bcd8 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -199,7 +199,7 @@ static inline enum isci_request_status isci_request_change_started_to_newstate( old_state = isci_request->status; - if (old_state == started) { + if (old_state == started || old_state == aborting) { BUG_ON(isci_request->io_request_completion != NULL); isci_request->io_request_completion = completion_ptr; diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 7e966840255..338f08ec4d8 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -903,7 +903,9 @@ static void isci_terminate_request( new_request_state ); - if ((old_state == started) || (old_state == completed)) { + if ((old_state == started) || + (old_state == completed) || + (old_state == aborting)) { /* If the old_state is started: * This request was not already being aborted. If it had been, @@ -920,6 +922,10 @@ static void isci_terminate_request( * This request completed from the SCU hardware perspective * and now just needs cleaning up in terms of freeing the * request and potentially calling up to libsas. + * + * If old_state == aborting: + * This request has already gone through a TMF timeout, but may + * not have been terminated; needs cleaning up at least. */ isci_terminate_request_core(isci_host, isci_device, isci_request); @@ -1297,14 +1303,16 @@ int isci_task_abort_task(struct sas_task *task) spin_lock_irqsave(&isci_host->scic_lock, flags); - /* Check the request status and change to "aborting" if currently + /* Check the request status and change to "aborted" if currently * "starting"; if true then set the I/O kernel completion * struct that will be triggered when the request completes. */ old_state = isci_task_validate_request_to_abort( old_request, isci_host, isci_device, &aborted_io_completion); - if ((old_state != started) && (old_state != completed)) { + if ((old_state != started) && + (old_state != completed) && + (old_state != aborting)) { spin_unlock_irqrestore(&isci_host->scic_lock, flags); -- cgit v1.2.3-70-g09d2 From 6cb4d6b382be6345c2d0c4b1b90dfdf9af32da7e Mon Sep 17 00:00:00 2001 From: Bartosz Barcinski <Bartosz.Barcinski@intel.com> Date: Tue, 12 Apr 2011 17:28:43 -0700 Subject: isci: audit usage of BUG_ON macro in isci driver Removes unnecessary usage of BUG_ON macro, excluding core directory. In some cases macro is unnecesary, check is done in caller function. In other cases macro is replaced by if construction with appropriate warning. Signed-off-by: Maciej Patelczyk <maciej.patelczyk@intel.com> [changed some survivable bug conditions to WARN_ONCE] Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/scic_sds_request.c | 1 - drivers/scsi/isci/init.c | 4 ---- drivers/scsi/isci/port.c | 1 + drivers/scsi/isci/remote_device.c | 10 ++++++---- drivers/scsi/isci/request.c | 1 - drivers/scsi/isci/request.h | 5 ++--- drivers/scsi/isci/task.c | 11 ++++++----- 7 files changed, 15 insertions(+), 18 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index 64aa9c60836..a6ee155c682 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -1635,7 +1635,6 @@ static void scic_sds_request_completed_state_enter( struct isci_host *ihost = sci_object_get_association(scic); struct isci_request *ireq = sci_object_get_association(sci_req); - SET_STATE_HANDLER(sci_req, scic_sds_request_state_handler_table, SCI_BASE_REQUEST_STATE_COMPLETED); diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 5e63ae6a75d..5da9a6925cd 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -362,8 +362,6 @@ static int isci_setup_interrupts(struct pci_dev *pdev) else isr = isci_msix_isr; - BUG_ON(!isci_host); - err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, DRV_NAME"-msix", isci_host); if (!err) @@ -379,13 +377,11 @@ static int isci_setup_interrupts(struct pci_dev *pdev) pci_disable_msix(pdev); goto intx; } - return 0; intx: err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", pdev); - return err; } diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index b675a94f4a7..cf78cf0a674 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -178,6 +178,7 @@ void isci_port_link_up( unsigned long success = true; BUG_ON(isci_phy->isci_port != NULL); + isci_phy->isci_port = isci_port; dev_dbg(&isci_host->pdev->dev, diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 320850cf46f..9301e25dff3 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -202,8 +202,6 @@ static enum sci_status isci_remote_device_construct( sci_object_set_association(to_sci_dev(isci_device), isci_device); - BUG_ON(port->isci_host == NULL); - /* start the device. */ status = scic_remote_device_start(to_sci_dev(isci_device), ISCI_REMOTE_DEVICE_START_TIMEOUT); @@ -257,8 +255,12 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) return NULL; } - BUG_ON(!list_empty(&idev->reqs_in_process)); - BUG_ON(!list_empty(&idev->node)); + if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) + return NULL; + + if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) + return NULL; + isci_remote_device_change_state(idev, isci_freed); return idev; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 37ffedc94ac..a90c299b723 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -472,7 +472,6 @@ int isci_request_execute( out: if (status != SCI_SUCCESS) { - /* release dma memory on failure. */ isci_request_free(isci_host, request); request = NULL; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 0c08da6bcd8..642b21166fc 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -193,8 +193,6 @@ static inline enum isci_request_status isci_request_change_started_to_newstate( enum isci_request_status old_state; unsigned long flags; - BUG_ON(isci_request == NULL); - spin_lock_irqsave(&isci_request->state_lock, flags); old_state = isci_request->status; @@ -243,7 +241,8 @@ static inline void isci_request_free( struct isci_host *isci_host, struct isci_request *isci_request) { - BUG_ON(isci_request == NULL); + if (!isci_request) + return; /* release the dma memory if we fail. */ dma_pool_free(isci_host->dma_pool, isci_request, diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b88101e195d..c79968db871 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -272,7 +272,7 @@ static enum sci_status isci_task_request_build( { struct scic_sds_remote_device *sci_device; enum sci_status status = SCI_FAILURE; - struct isci_request *request; + struct isci_request *request = NULL; struct isci_remote_device *isci_device; /* struct sci_sas_identify_address_frame_protocols dev_protocols; */ struct smp_discover_response_protocols dev_protocols; @@ -372,8 +372,6 @@ static void isci_tmf_timeout_cb(void *tmf_request_arg) struct isci_tmf *tmf = isci_request_access_tmf(request); enum sci_status status; - BUG_ON(request->ttype != tmf_task); - /* This task management request has timed-out. Terminate the request * so that the request eventually completes to the requestor in the * request completion callback path. @@ -1121,8 +1119,11 @@ static void isci_abort_task_process_cb( * request state was already set to "aborted" by the abort * task function. */ - BUG_ON((old_request->status != aborted) - && (old_request->status != completed)); + if ((old_request->status != aborted) + && (old_request->status != completed)) + dev_err(&old_request->isci_host->pdev->dev, + "%s: Bad request status (%d): tmf=%p, old_request=%p\n", + __func__, old_request->status, tmf, old_request); break; case isci_tmf_timed_out: -- cgit v1.2.3-70-g09d2 From 9286a1959ce7f3df3c1a8e33eb9b210078318dc8 Mon Sep 17 00:00:00 2001 From: Dave Jiang <dave.jiang@intel.com> Date: Fri, 6 May 2011 02:17:37 +0000 Subject: isci: Removing unnecessary functions in request.c No need for wrappers, just access sas_task directly. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/scic_sds_request.c | 24 +++++++++++----------- drivers/scsi/isci/request.c | 34 ------------------------------- drivers/scsi/isci/request.h | 25 ----------------------- 3 files changed, 12 insertions(+), 71 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index 6286decd62e..52692a16449 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -666,19 +666,20 @@ u32 scic_io_request_get_object_size(void) enum sci_status scic_io_request_construct_basic_ssp( struct scic_sds_request *sci_req) { - struct isci_request *isci_request = sci_req->ireq; + struct isci_request *ireq = sci_req->ireq; + struct sas_task *task = isci_request_access_task(ireq); sci_req->protocol = SCIC_SSP_PROTOCOL; - scu_ssp_io_request_construct_task_context( - sci_req, - isci_request_io_request_get_data_direction(isci_request), - isci_request_io_request_get_transfer_length(isci_request)); + scu_ssp_io_request_construct_task_context(sci_req, + task->data_dir, + task->total_xfer_len); scic_sds_io_request_build_ssp_command_iu(sci_req); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); return SCI_SUCCESS; } @@ -705,8 +706,6 @@ enum sci_status scic_io_request_construct_basic_sata( { enum sci_status status; struct scic_sds_stp_request *stp_req; - u32 len; - enum dma_data_direction dir; bool copy = false; struct isci_request *isci_request = sci_req->ireq; struct sas_task *task = isci_request_access_task(isci_request); @@ -715,11 +714,12 @@ enum sci_status scic_io_request_construct_basic_sata( sci_req->protocol = SCIC_STP_PROTOCOL; - len = isci_request_io_request_get_transfer_length(isci_request); - dir = isci_request_io_request_get_data_direction(isci_request); copy = (task->data_dir == DMA_NONE) ? false : true; - status = scic_io_request_construct_sata(sci_req, len, dir, copy); + status = scic_io_request_construct_sata(sci_req, + task->total_xfer_len, + task->data_dir, + copy); if (status == SCI_SUCCESS) sci_base_state_machine_change_state(&sci_req->state_machine, diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 0521c045d43..e01c2c98f4e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1197,37 +1197,3 @@ void isci_request_io_request_complete( isci_host_can_dequeue(isci_host, 1); } - -/** - * isci_request_io_request_get_transfer_length() - This function is called by - * the sci core to retrieve the transfer length for a given request. - * @request: This parameter is the isci_request object. - * - * length of transfer for specified request. - */ -u32 isci_request_io_request_get_transfer_length(struct isci_request *request) -{ - struct sas_task *task = isci_request_access_task(request); - - dev_dbg(&request->isci_host->pdev->dev, - "%s: total_xfer_len: %d\n", - __func__, - task->total_xfer_len); - return task->total_xfer_len; -} - - -/** - * isci_request_io_request_get_data_direction() - This function is called by - * the sci core to retrieve the data direction for a given request. - * @request: This parameter is the isci_request object. - * - * data direction for specified request. - */ -enum dma_data_direction isci_request_io_request_get_data_direction( - struct isci_request *request) -{ - struct sas_task *task = isci_request_access_task(request); - - return task->data_dir; -} diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 642b21166fc..9c97715e54d 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -343,11 +343,6 @@ void isci_request_io_request_complete( struct isci_request *request, enum sci_io_status completion_status); -u32 isci_request_io_request_get_transfer_length( - struct isci_request *request); - -enum dma_data_direction isci_request_io_request_get_data_direction(struct isci_request *req); - /** * isci_request_io_request_get_next_sge() - This function is called by the sci * core to retrieve the next sge for a given request. @@ -391,26 +386,6 @@ static inline void *isci_request_io_request_get_next_sge( } - -void *isci_request_ssp_io_request_get_cdb_address( - struct isci_request *request); - -u32 isci_request_ssp_io_request_get_cdb_length( - struct isci_request *request); - -u32 isci_request_ssp_io_request_get_lun( - struct isci_request *request); - -u32 isci_request_ssp_io_request_get_task_attribute( - struct isci_request *request); - -u32 isci_request_ssp_io_request_get_command_priority( - struct isci_request *request); - - - - - void isci_terminate_pending_requests( struct isci_host *isci_host, struct isci_remote_device *isci_device, -- cgit v1.2.3-70-g09d2 From 0d84366fbef557f92ef82ac9a224c57ffb3318bc Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Sun, 8 May 2011 01:56:57 -0700 Subject: isci: make sgl explicit/aligned request object member Towards unifying request objects we need all members to be defined in the object and not carved out of anonymous buffer space. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/scic_sds_request.c | 11 ++--------- drivers/scsi/isci/core/scic_sds_request.h | 14 +++++--------- drivers/scsi/isci/core/scic_sds_smp_request.c | 1 - drivers/scsi/isci/core/scic_sds_stp_request.c | 18 +----------------- drivers/scsi/isci/request.c | 14 ++++---------- drivers/scsi/isci/request.h | 13 +++---------- drivers/scsi/isci/task.c | 12 ++++-------- 7 files changed, 19 insertions(+), 64 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index fd7bd334fa4..189a7999726 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -212,8 +212,7 @@ static u32 scic_sds_ssp_request_get_object_size(void) return sizeof(struct scic_sds_request) + scic_ssp_io_request_get_object_size() + sizeof(struct scu_task_context) - + SMP_CACHE_BYTES - + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS; + + SMP_CACHE_BYTES; } /** @@ -239,7 +238,7 @@ static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( return &task_context->sgl_pair_cd; } - return &sci_req->sgl_element_pair_buffer[sgl_pair_index - 2]; + return &sci_req->sg_table[sgl_pair_index - 2]; } /** @@ -328,11 +327,6 @@ static void scic_sds_ssp_io_request_assign_buffers( scic_sds_ssp_request_get_command_buffer(sci_req); sci_req->response_buffer = scic_sds_ssp_request_get_response_buffer(sci_req); - sci_req->sgl_element_pair_buffer = - scic_sds_ssp_request_get_sgl_element_buffer(sci_req); - sci_req->sgl_element_pair_buffer = - PTR_ALIGN(sci_req->sgl_element_pair_buffer, - sizeof(struct scu_sgl_element_pair)); if (sci_req->was_tag_assigned_by_user == false) { sci_req->task_context_buffer = @@ -535,7 +529,6 @@ static void scic_sds_ssp_task_request_assign_buffers( scic_sds_ssp_task_request_get_command_buffer(sci_req); sci_req->response_buffer = scic_sds_ssp_task_request_get_response_buffer(sci_req); - sci_req->sgl_element_pair_buffer = NULL; if (sci_req->was_tag_assigned_by_user == false) { sci_req->task_context_buffer = diff --git a/drivers/scsi/isci/core/scic_sds_request.h b/drivers/scsi/isci/core/scic_sds_request.h index c93f3ed8946..83d737adbc4 100644 --- a/drivers/scsi/isci/core/scic_sds_request.h +++ b/drivers/scsi/isci/core/scic_sds_request.h @@ -60,6 +60,7 @@ #include "sci_base_state_machine.h" #include "scu_task_context.h" #include "scic_sds_stp_request.h" +#include "scu_constants.h" struct scic_sds_controller; struct scic_sds_remote_device; @@ -183,7 +184,10 @@ struct scic_sds_request { void *command_buffer; void *response_buffer; struct scu_task_context *task_context_buffer; - struct scu_sgl_element_pair *sgl_element_pair_buffer; + + /* could be larger with sg chaining */ + #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) + struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); /** * This field indicates if this request is a task management request or @@ -327,14 +331,6 @@ struct scic_sds_io_request_state_handler { extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[]; -/** - * - * - * This macro returns the maximum number of SGL element paris that we will - * support in a single IO request. - */ -#define SCU_MAX_SGL_ELEMENT_PAIRS ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) - /** * scic_sds_request_get_controller() - * diff --git a/drivers/scsi/isci/core/scic_sds_smp_request.c b/drivers/scsi/isci/core/scic_sds_smp_request.c index 1f8773d3b79..cb1adef1d55 100644 --- a/drivers/scsi/isci/core/scic_sds_smp_request.c +++ b/drivers/scsi/isci/core/scic_sds_smp_request.c @@ -131,7 +131,6 @@ void scic_sds_smp_request_assign_buffers( scic_sds_smp_request_get_command_buffer(sci_req); sci_req->response_buffer = scic_sds_smp_request_get_response_buffer(sci_req); - sci_req->sgl_element_pair_buffer = NULL; if (sci_req->was_tag_assigned_by_user == false) { sci_req->task_context_buffer = diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c index 7dba40fc258..013af11df62 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ b/drivers/scsi/isci/core/scic_sds_stp_request.c @@ -105,18 +105,6 @@ + SSP_RESP_IU_MAX_SIZE \ )) -/** - * scic_sds_stp_request_get_sgl_element_buffer() - - * - * This macro returns the address of the sgl elment pairs in the io request - * memory buffer - */ -#define scic_sds_stp_request_get_sgl_element_buffer(memory) \ - ((struct scu_sgl_element_pair *)(\ - ((char *)(scic_sds_stp_request_get_task_context_buffer(memory))) \ - + sizeof(struct scu_task_context) \ - )) - /** * * @@ -128,8 +116,7 @@ u32 scic_sds_stp_request_get_object_size(void) + sizeof(struct host_to_dev_fis) + sizeof(struct dev_to_host_fis) + sizeof(struct scu_task_context) - + SMP_CACHE_BYTES - + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS; + + SMP_CACHE_BYTES; } void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) @@ -138,9 +125,6 @@ void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) sci_req->command_buffer = scic_sds_stp_request_get_h2d_reg_buffer(stp_req); sci_req->response_buffer = scic_sds_stp_request_get_response_buffer(stp_req); - sci_req->sgl_element_pair_buffer = scic_sds_stp_request_get_sgl_element_buffer(stp_req); - sci_req->sgl_element_pair_buffer = PTR_ALIGN(sci_req->sgl_element_pair_buffer, - sizeof(struct scu_sgl_element_pair)); if (sci_req->was_tag_assigned_by_user == false) { sci_req->task_context_buffer = diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index e01c2c98f4e..9dd971a3fbb 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -200,14 +200,10 @@ static enum sci_status isci_io_request_build( /* build the common request object. For now, * we will let the core allocate the IO tag. */ - status = scic_io_request_construct( - &isci_host->sci, - sci_device, - SCI_CONTROLLER_INVALID_IO_TAG, - request, - request->sci_request_mem_ptr, - (struct scic_sds_request **)&request->sci_request_handle - ); + status = scic_io_request_construct(&isci_host->sci, sci_device, + SCI_CONTROLLER_INVALID_IO_TAG, + request, request->sci_req, + &request->sci_request_handle); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -277,8 +273,6 @@ static int isci_request_alloc_core( /* initialize the request object. */ spin_lock_init(&request->state_lock); - request->sci_request_mem_ptr = ((u8 *)request) + - sizeof(struct isci_request); request->request_daddr = handle; request->isci_host = isci_host; request->isci_device = isci_device; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 9c97715e54d..ddfbf71c97e 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -53,10 +53,11 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#if !defined(_ISCI_REQUEST_H_) +#ifndef _ISCI_REQUEST_H_ #define _ISCI_REQUEST_H_ #include "isci.h" +#include "scic_sds_request.h" /** * struct isci_request_status - This enum defines the possible states of an I/O @@ -80,16 +81,8 @@ enum task_type { tmf_task = 1 }; -/** - * struct isci_request - This class represents the request object used to track - * IO, smp and TMF request internal. It wraps the SCIC request object. - * - * - */ struct isci_request { - struct scic_sds_request *sci_request_handle; - enum isci_request_status status; enum task_type ttype; unsigned short io_tag; @@ -105,7 +98,6 @@ struct isci_request { struct list_head completed_node; /* For use in the reqs_in_process list: */ struct list_head dev_node; - void *sci_request_mem_ptr; spinlock_t state_lock; dma_addr_t request_daddr; dma_addr_t zero_scatter_daddr; @@ -123,6 +115,7 @@ struct isci_request { * TMF was aborting is guaranteed to have completed. */ struct completion *io_request_completion; + struct scic_sds_request sci_req[0] ____cacheline_aligned; }; /** diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 12f2df94736..7d5f7937845 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -298,14 +298,10 @@ static enum sci_status isci_task_request_build( goto out; /* let the core do it's construct. */ - status = scic_task_request_construct( - &isci_host->sci, - sci_device, - SCI_CONTROLLER_INVALID_IO_TAG, - request, - request->sci_request_mem_ptr, - &request->sci_request_handle - ); + status = scic_task_request_construct(&isci_host->sci, sci_device, + SCI_CONTROLLER_INVALID_IO_TAG, + request, &request->sci_req, + &request->sci_request_handle); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, -- cgit v1.2.3-70-g09d2 From 67ea838d17acdad3331aeae848683c768df96aaa Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Sun, 8 May 2011 11:47:15 -0700 Subject: isci: unify request data structures Make scic_sds_request a proper member of isci_request. Also let's us get rid of the dma pool object size tracking since we now know that all requests are sizeof(isci_request). While cleaning up the construct routine incidentally replaced SCI_FIELD_OFFSET with offsetof. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/sci_util.c | 9 +- drivers/scsi/isci/core/sci_util.h | 3 - drivers/scsi/isci/core/scic_io_request.h | 33 +---- drivers/scsi/isci/core/scic_sds_controller.c | 2 +- drivers/scsi/isci/core/scic_sds_request.c | 53 +++----- drivers/scsi/isci/core/scic_sds_request.h | 14 --- drivers/scsi/isci/core/scic_sds_stp_request.c | 4 +- drivers/scsi/isci/core/scic_task_request.h | 36 +----- drivers/scsi/isci/host.c | 8 +- drivers/scsi/isci/host.h | 1 - drivers/scsi/isci/remote_device.c | 2 +- drivers/scsi/isci/request.c | 168 ++++++++++++-------------- drivers/scsi/isci/request.h | 12 +- drivers/scsi/isci/sata.c | 10 +- drivers/scsi/isci/task.c | 30 ++--- 15 files changed, 128 insertions(+), 257 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/sci_util.c b/drivers/scsi/isci/core/sci_util.c index 0c75d14240a..4e60d55836e 100644 --- a/drivers/scsi/isci/core/sci_util.c +++ b/drivers/scsi/isci/core/sci_util.c @@ -59,14 +59,14 @@ void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr) { - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); dma_addr_t offset; BUG_ON(phys_addr < ireq->request_daddr); offset = phys_addr - ireq->request_daddr; - BUG_ON(offset >= ireq->request_alloc_size); + BUG_ON(offset >= sizeof(*ireq)); return (char *)ireq + offset; } @@ -74,14 +74,13 @@ void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t ph dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sds_request, void *virt_addr) { - struct isci_request *isci_request = sds_request->ireq; + struct isci_request *isci_request = sci_req_to_ireq(sds_request); char *requested_addr = (char *)virt_addr; char *base_addr = (char *)isci_request; BUG_ON(requested_addr < base_addr); - BUG_ON((requested_addr - base_addr) >= - isci_request->request_alloc_size); + BUG_ON((requested_addr - base_addr) >= sizeof(*isci_request)); return isci_request->request_daddr + (requested_addr - base_addr); } diff --git a/drivers/scsi/isci/core/sci_util.h b/drivers/scsi/isci/core/sci_util.h index 4e9c3189cf9..0f9dd0fe126 100644 --- a/drivers/scsi/isci/core/sci_util.h +++ b/drivers/scsi/isci/core/sci_util.h @@ -66,9 +66,6 @@ | ((char_buffer)[3]) \ ) -#define SCI_FIELD_OFFSET(type, field) ((unsigned long)&(((type *)0)->field)) - - #define sci_cb_make_physical_address(physical_addr, addr_upper, addr_lower) \ ((physical_addr) = (addr_lower) | ((u64)addr_upper) << 32) diff --git a/drivers/scsi/isci/core/scic_io_request.h b/drivers/scsi/isci/core/scic_io_request.h index cb3667decb3..f7c6d426f8c 100644 --- a/drivers/scsi/isci/core/scic_io_request.h +++ b/drivers/scsi/isci/core/scic_io_request.h @@ -102,41 +102,10 @@ typedef enum { } SCIC_TRANSPORT_PROTOCOL; - -/** - * scic_io_request_construct() - This method is called by the SCI user to - * construct all SCI Core IO requests. Memory initialization and - * functionality common to all IO request types is performed in this method. - * @scic_controller: the handle to the core controller object for which to - * build an IO request. - * @scic_remote_device: the handle to the core remote device object for which - * to build an IO request. - * @io_tag: This parameter specifies the IO tag to be associated with this - * request. If SCI_CONTROLLER_INVALID_IO_TAG is passed, then a copy of the - * request is built internally. The request will be copied into the actual - * controller request memory when the IO tag is allocated internally during - * the scic_controller_start_io() method. - * @user_io_request_object: This parameter specifies the user IO request to be - * utilized during IO construction. This IO pointer will become the - * associated object for the core IO request object. - * @scic_io_request_memory: This parameter specifies the memory location to be - * utilized when building the core request. - * @new_scic_io_request_handle: This parameter specifies a pointer to the - * handle the core will expect in further interactions with the core IO - * request object. - * - * The SCI core implementation will create an association between the user IO - * request object and the core IO request object. Indicate if the controller - * successfully built the IO request. SCI_SUCCESS This value is returned if the - * IO request was successfully built. - */ enum sci_status scic_io_request_construct( struct scic_sds_controller *scic_controller, struct scic_sds_remote_device *scic_remote_device, - u16 io_tag, - void *user_io_request_object, - struct scic_sds_request *scic_io_request_memory, - struct scic_sds_request **new_scic_io_request_handle); + u16 io_tag, struct scic_sds_request *sci_req); /** * scic_io_request_construct_basic_ssp() - This method is called by the SCI diff --git a/drivers/scsi/isci/core/scic_sds_controller.c b/drivers/scsi/isci/core/scic_sds_controller.c index 4179bdf3eda..852b7d52f84 100644 --- a/drivers/scsi/isci/core/scic_sds_controller.c +++ b/drivers/scsi/isci/core/scic_sds_controller.c @@ -1572,7 +1572,7 @@ void scic_sds_controller_copy_task_context( memcpy(task_context_buffer, sci_req->task_context_buffer, - SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac)); + offsetof(struct scu_task_context, sgl_snapshot_ac)); /* * Now that the soft copy of the TC has been copied into the TC diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index 36c2b310c17..50dd19bba3d 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -117,7 +117,7 @@ static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( */ void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) { - struct isci_request *isci_request = sds_request->ireq; + struct isci_request *isci_request = sci_req_to_ireq(sds_request); struct isci_host *isci_host = isci_request->isci_host; struct sas_task *task = isci_request_access_task(isci_request); struct scatterlist *sg = NULL; @@ -190,7 +190,7 @@ static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_ static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) { struct ssp_cmd_iu *cmd_iu; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); cmd_iu = &sci_req->ssp.cmd; @@ -211,7 +211,7 @@ static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sc static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) { struct ssp_task_iu *task_iu; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); @@ -429,7 +429,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, bool copy) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); /* check for management protocols */ @@ -478,7 +478,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, enum sci_status scic_io_request_construct_basic_ssp( struct scic_sds_request *sci_req) { - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); sci_req->protocol = SCIC_SSP_PROTOCOL; @@ -519,7 +519,7 @@ enum sci_status scic_io_request_construct_basic_sata( enum sci_status status; struct scic_sds_stp_request *stp_req; bool copy = false; - struct isci_request *isci_request = sci_req->ireq; + struct isci_request *isci_request = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(isci_request); stp_req = &sci_req->stp.req; @@ -540,11 +540,10 @@ enum sci_status scic_io_request_construct_basic_sata( } -enum sci_status scic_task_request_construct_sata( - struct scic_sds_request *sci_req) +enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); /* check for management protocols */ if (ireq->ttype == tmf_task) { @@ -740,7 +739,7 @@ void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) void *resp_buf; u32 len; struct ssp_response_iu *ssp_response; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); ssp_response = &sci_req->ssp.rsp; @@ -1342,7 +1341,7 @@ static void scic_sds_request_completed_state_enter(void *object) struct scic_sds_controller *scic = scic_sds_request_get_controller(sci_req); struct isci_host *ihost = scic_to_ihost(scic); - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); SET_STATE_HANDLER(sci_req, scic_sds_request_state_handler_table, @@ -1424,16 +1423,13 @@ static const struct sci_base_state scic_sds_request_state_table[] = { static void scic_sds_general_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, - void *user_io_request_object, - struct scic_sds_request *sci_req) + u16 io_tag, struct scic_sds_request *sci_req) { sci_base_state_machine_construct(&sci_req->state_machine, sci_req, scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL); sci_base_state_machine_start(&sci_req->state_machine); sci_req->io_tag = io_tag; - sci_req->user_request = user_io_request_object; sci_req->owning_controller = scic; sci_req->target_device = sci_dev; sci_req->has_started_substate_machine = false; @@ -1461,20 +1457,13 @@ static void scic_sds_general_request_construct(struct scic_sds_controller *scic, enum sci_status scic_io_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, - void *user_req, - struct scic_sds_request *sci_req, - struct scic_sds_request **new_sci_req) + u16 io_tag, struct scic_sds_request *sci_req) { struct domain_device *dev = sci_dev_to_domain(sci_dev); enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, - sci_dev, - io_tag, - user_req, - sci_req); + scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) @@ -1493,10 +1482,8 @@ scic_io_request_construct(struct scic_sds_controller *scic, status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; if (status == SCI_SUCCESS) { - memset(sci_req->task_context_buffer, - 0, - SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab)); - *new_sci_req = sci_req; + memset(sci_req->task_context_buffer, 0, + offsetof(struct scu_task_context, sgl_pair_ab)); } return status; @@ -1504,18 +1491,13 @@ scic_io_request_construct(struct scic_sds_controller *scic, enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, - void *user_io_request_object, - struct scic_sds_request *sci_req, - struct scic_sds_request **new_sci_req) + u16 io_tag, struct scic_sds_request *sci_req) { struct domain_device *dev = sci_dev_to_domain(sci_dev); enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, io_tag, - user_io_request_object, - sci_req); + scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); if (dev->dev_type == SAS_END_DEV) { scic_sds_ssp_task_request_assign_buffers(sci_req); @@ -1537,7 +1519,6 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, if (status == SCI_SUCCESS) { sci_req->is_task_management_request = true; memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); - *new_sci_req = sci_req; } return status; diff --git a/drivers/scsi/isci/core/scic_sds_request.h b/drivers/scsi/isci/core/scic_sds_request.h index 3f551eaf3df..1dd98aabe32 100644 --- a/drivers/scsi/isci/core/scic_sds_request.h +++ b/drivers/scsi/isci/core/scic_sds_request.h @@ -113,26 +113,12 @@ enum scic_sds_smp_request_started_substates { SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, }; -struct isci_request; -/** - * struct scic_sds_request - This structure contains or references all of - * the data necessary to process a task management or normal IO request. - * - * - */ struct scic_sds_request { - /** - * The field specifies that the peer object for the request object. - */ - struct isci_request *ireq; - /** * This field contains the information for the base request state machine. */ struct sci_base_state_machine state_machine; - void *user_request; - /** * This field simply points to the controller to which this IO request * is associated. diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c index c7a8931a3bc..2677393db6f 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ b/drivers/scsi/isci/core/scic_sds_stp_request.c @@ -627,7 +627,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *s int total_len = len; sci_req = to_sci_req(stp_req); - ireq = scic_sds_request_get_user_request(sci_req); + ireq = sci_req_to_ireq(sci_req); task = isci_request_access_task(ireq); src_addr = data_buf; @@ -737,7 +737,7 @@ static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct { struct scic_sds_controller *scic = sci_req->owning_controller; struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct dev_to_host_fis *frame_header; enum sci_status status; diff --git a/drivers/scsi/isci/core/scic_task_request.h b/drivers/scsi/isci/core/scic_task_request.h index 7e6d20aa0f0..98cfaa9e6d3 100644 --- a/drivers/scsi/isci/core/scic_task_request.h +++ b/drivers/scsi/isci/core/scic_task_request.h @@ -72,44 +72,10 @@ struct scic_sds_remote_device; struct scic_sds_controller; -/** - * scic_task_request_construct() - This method is called by the SCI user to - * construct all SCI Core task management requests, regardless of protocol. - * Memory initialization and functionality common to all task request types - * is performed in this method. - * @scic_controller: the handle to the core controller object for which to - * build the task managmement request. - * @scic_remote_device: the handle to the core remote device object for which - * to build the task management request. passed, then a copy of the request - * is built internally. The request will be copied into the actual - * controller request memory when the task is allocated internally during - * the scic_controller_start_task() method. - * @io_tag: This parameter specifies the IO tag to be associated with this - * request. If SCI_CONTROLLER_INVALID_IO_TAG is passed, then a copy of the - * request is built internally. The request will be copied into the actual - * controller request memory when the IO tag is allocated internally during - * the scic_controller_start_io() method. - * @user_task_request_object: This parameter specifies the user task request to - * be utilized during construction. This task pointer will become the - * associated object for the core task request object. - * @scic_task_request_memory: This parameter specifies the memory location to - * be utilized when building the core request. - * @new_scic_task_request_handle: This parameter specifies a pointer to the - * handle the core will expect in further interactions with the core task - * request object. - * - * The SCI core implementation will create an association between the user task - * request object and the core task request object. Indicate if the controller - * successfully built the task request. SCI_SUCCESS This value is returned if - * the task request was successfully built. - */ enum sci_status scic_task_request_construct( struct scic_sds_controller *scic_controller, struct scic_sds_remote_device *scic_remote_device, - u16 io_tag, - void *user_task_request_object, - void *scic_task_request_memory, - struct scic_sds_request **new_scic_task_request_handle); + u16 io_tag, struct scic_sds_request *sci_req); /** * scic_task_request_construct_ssp() - This method is called by the SCI user to diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 4d0ee7bf847..271a7e171e7 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -428,14 +428,8 @@ int isci_host_init(struct isci_host *isci_host) if (err) return err; - /* - * keep the pool alloc size around, will use it for a bounds checking - * when trying to convert virtual addresses to physical addresses - */ - isci_host->dma_pool_alloc_size = sizeof(struct isci_request) + - sizeof(struct scic_sds_request); isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev, - isci_host->dma_pool_alloc_size, + sizeof(struct isci_request), SLAB_HWCACHE_ALIGN, 0); if (!isci_host->dma_pool) diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 5a414c31a87..afa41e83eaa 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -82,7 +82,6 @@ struct isci_host { struct list_head timers; void *core_ctrl_memory; struct dma_pool *dma_pool; - unsigned int dma_pool_alloc_size; struct isci_phy phys[SCI_MAX_PHYS]; struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ struct sas_ha_struct sas_ha; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index a441c23b685..8b1ef19a673 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -473,7 +473,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic struct sci_base_state_machine *sm = &sci_dev->state_machine; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_port *sci_port = sci_dev->owning_port; - struct isci_request *ireq = sci_req->ireq; + struct isci_request *ireq = sci_req_to_ireq(sci_req); enum sci_status status; switch (state) { diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index a5b9b22d3b3..4961ee34709 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -73,9 +73,7 @@ static enum sci_status isci_request_ssp_request_construct( "%s: request = %p\n", __func__, request); - status = scic_io_request_construct_basic_ssp( - request->sci_request_handle - ); + status = scic_io_request_construct_basic_ssp(&request->sci); return status; } @@ -96,9 +94,7 @@ static enum sci_status isci_request_stp_request_construct( */ register_fis = isci_sata_task_to_fis_copy(task); - status = scic_io_request_construct_basic_sata( - request->sci_request_handle - ); + status = scic_io_request_construct_basic_sata(&request->sci); /* Set the ncq tag in the fis, from the queue * command in the task. @@ -125,7 +121,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) { enum sci_status status = SCI_FAILURE; struct sas_task *task = isci_request_access_task(ireq); - struct scic_sds_request *sci_req = ireq->sci_request_handle; + struct scic_sds_request *sci_req = &ireq->sci; dev_dbg(&ireq->isci_host->pdev->dev, "%s: request = %p\n", __func__, ireq); @@ -201,8 +197,7 @@ static enum sci_status isci_io_request_build( */ status = scic_io_request_construct(&isci_host->sci, sci_device, SCI_CONTROLLER_INVALID_IO_TAG, - request, request->sci_req, - &request->sci_request_handle); + &request->sci); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -211,8 +206,6 @@ static enum sci_status isci_io_request_build( return SCI_FAILURE; } - request->sci_request_handle->ireq = request; - switch (task->task_proto) { case SAS_PROTOCOL_SMP: status = isci_smp_request_build(request); @@ -276,8 +269,8 @@ static int isci_request_alloc_core( request->isci_host = isci_host; request->isci_device = isci_device; request->io_request_completion = NULL; + request->terminated = false; - request->request_alloc_size = isci_host->dma_pool_alloc_size; request->num_sg_entries = 0; request->complete_in_target = false; @@ -381,80 +374,74 @@ int isci_request_execute( goto out; status = isci_io_request_build(isci_host, request, isci_device); - if (status == SCI_SUCCESS) { - - spin_lock_irqsave(&isci_host->scic_lock, flags); - - /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io( - &isci_host->sci, - sci_device, - request->sci_request_handle, - SCI_CONTROLLER_INVALID_IO_TAG - ); - - if (status == SCI_SUCCESS || - status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { - - /* Either I/O started OK, or the core has signaled that - * the device needs a target reset. - * - * In either case, hold onto the I/O for later. - * - * Update it's status and add it to the list in the - * remote device object. - */ - isci_request_change_state(request, started); - list_add(&request->dev_node, - &isci_device->reqs_in_process); - - if (status == SCI_SUCCESS) { - /* Save the tag for possible task mgmt later. */ - request->io_tag = scic_io_request_get_io_tag( - request->sci_request_handle); - } else { - /* The request did not really start in the - * hardware, so clear the request handle - * here so no terminations will be done. - */ - request->sci_request_handle = NULL; - } + if (status != SCI_SUCCESS) { + dev_warn(&isci_host->pdev->dev, + "%s: request_construct failed - status = 0x%x\n", + __func__, + status); + goto out; + } - } else - dev_warn(&isci_host->pdev->dev, - "%s: failed request start (0x%x)\n", - __func__, status); + spin_lock_irqsave(&isci_host->scic_lock, flags); + /* send the request, let the core assign the IO TAG. */ + status = scic_controller_start_io(&isci_host->sci, sci_device, + &request->sci, + SCI_CONTROLLER_INVALID_IO_TAG); + if (status != SCI_SUCCESS && + status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + dev_warn(&isci_host->pdev->dev, + "%s: failed request start (0x%x)\n", + __func__, status); spin_unlock_irqrestore(&isci_host->scic_lock, flags); + goto out; + } - if (status == - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { - /* Signal libsas that we need the SCSI error - * handler thread to work on this I/O and that - * we want a device reset. - */ - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; - spin_unlock_irqrestore(&task->task_state_lock, flags); - - /* Cause this task to be scheduled in the SCSI error - * handler thread. - */ - isci_execpath_callback(isci_host, task, - sas_task_abort); - - /* Change the status, since we are holding - * the I/O until it is managed by the SCSI - * error handler. - */ - status = SCI_SUCCESS; - } + /* Either I/O started OK, or the core has signaled that + * the device needs a target reset. + * + * In either case, hold onto the I/O for later. + * + * Update it's status and add it to the list in the + * remote device object. + */ + isci_request_change_state(request, started); + list_add(&request->dev_node, &isci_device->reqs_in_process); - } else - dev_warn(&isci_host->pdev->dev, - "%s: request_construct failed - status = 0x%x\n", - __func__, - status); + if (status == SCI_SUCCESS) { + /* Save the tag for possible task mgmt later. */ + request->io_tag = scic_io_request_get_io_tag(&request->sci); + } else { + /* The request did not really start in the + * hardware, so clear the request handle + * here so no terminations will be done. + */ + request->terminated = true; + } + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + if (status == + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* Signal libsas that we need the SCSI error + * handler thread to work on this I/O and that + * we want a device reset. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Cause this task to be scheduled in the SCSI error + * handler thread. + */ + isci_execpath_callback(isci_host, task, + sas_task_abort); + + /* Change the status, since we are holding + * the I/O until it is managed by the SCSI + * error handler. + */ + status = SCI_SUCCESS; + } out: if (status != SCI_SUCCESS) { @@ -554,9 +541,7 @@ static void isci_request_handle_controller_specific_errors( { unsigned int cstatus; - cstatus = scic_request_get_controller_status( - request->sci_request_handle - ); + cstatus = scic_request_get_controller_status(&request->sci); dev_dbg(&request->isci_host->pdev->dev, "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " @@ -997,13 +982,13 @@ void isci_request_io_request_complete( task); if (sas_protocol_ata(task->task_proto)) { - resp_buf = &request->sci_request_handle->stp.rsp; + resp_buf = &request->sci.stp.rsp; isci_request_process_stp_response(task, resp_buf); } else if (SAS_PROTOCOL_SSP == task->task_proto) { /* crack the iu response buffer. */ - resp_iu = &request->sci_request_handle->ssp.rsp; + resp_iu = &request->sci.ssp.rsp; isci_request_process_response_iu(task, resp_iu, &isci_host->pdev->dev); @@ -1034,7 +1019,7 @@ void isci_request_io_request_complete( request->complete_in_target = true; if (task->task_proto == SAS_PROTOCOL_SMP) { - void *rsp = &request->sci_request_handle->smp.rsp; + void *rsp = &request->sci.smp.rsp; dev_dbg(&isci_host->pdev->dev, "%s: SMP protocol completion\n", @@ -1051,8 +1036,7 @@ void isci_request_io_request_complete( * the maximum was transferred. */ u32 transferred_length - = scic_io_request_get_number_of_bytes_transferred( - request->sci_request_handle); + = scic_io_request_get_number_of_bytes_transferred(&request->sci); task->task_status.residual = task->total_xfer_len - transferred_length; @@ -1165,12 +1149,12 @@ void isci_request_io_request_complete( /* complete the io request to the core. */ scic_controller_complete_io(&isci_host->sci, &isci_device->sci, - request->sci_request_handle); - /* NULL the request handle so it cannot be completed or + &request->sci); + /* set terminated handle so it cannot be completed or * terminated again, and to cause any calls into abort * task to recognize the already completed case. */ - request->sci_request_handle = NULL; + request->terminated = true; isci_host_can_dequeue(isci_host, 1); } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index ddfbf71c97e..89d8b0a27df 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -82,11 +82,11 @@ enum task_type { }; struct isci_request { - struct scic_sds_request *sci_request_handle; enum isci_request_status status; enum task_type ttype; unsigned short io_tag; bool complete_in_target; + bool terminated; union ttype_ptr_union { struct sas_task *io_task_ptr; /* When ttype==io_task */ @@ -103,7 +103,6 @@ struct isci_request { dma_addr_t zero_scatter_daddr; unsigned int num_sg_entries; /* returned by pci_alloc_sg */ - unsigned int request_alloc_size; /* size of block from dma_pool_alloc */ /** Note: "io_request_completion" is completed in two different ways * depending on whether this is a TMF or regular request. @@ -115,9 +114,16 @@ struct isci_request { * TMF was aborting is guaranteed to have completed. */ struct completion *io_request_completion; - struct scic_sds_request sci_req[0] ____cacheline_aligned; + struct scic_sds_request sci; }; +static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req) +{ + struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci); + + return ireq; +} + /** * This function gets the status of the request object. * @request: This parameter points to the isci_request object diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index 578b1c5d40a..08dabf08f7d 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -72,7 +72,7 @@ struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task) { struct isci_request *ireq = task->lldd_task; - struct host_to_dev_fis *fis = &ireq->sci_request_handle->stp.cmd; + struct host_to_dev_fis *fis = &ireq->sci.stp.cmd; memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); @@ -118,7 +118,7 @@ void isci_sata_set_ncq_tag( struct isci_request *request = task->lldd_task; register_fis->sector_count = qc->tag << 3; - scic_stp_io_request_set_ncq_tag(request->sci_request_handle, qc->tag); + scic_stp_io_request_set_ncq_tag(&request->sci, qc->tag); } /** @@ -156,7 +156,7 @@ void isci_request_process_stp_response(struct sas_task *task, enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) { - struct scic_sds_request *sci_req = ireq->sci_request_handle; + struct scic_sds_request *sci_req = &ireq->sci; struct isci_tmf *isci_tmf; enum sci_status status; @@ -190,9 +190,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire /* core builds the protocol specific request * based on the h2d fis. */ - status = scic_task_request_construct_sata( - ireq->sci_request_handle - ); + status = scic_task_request_construct_sata(&ireq->sci); return status; } diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 492faeea8b3..7adaf71c19d 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -300,8 +300,7 @@ static enum sci_status isci_task_request_build( /* let the core do it's construct. */ status = scic_task_request_construct(&isci_host->sci, sci_device, SCI_CONTROLLER_INVALID_IO_TAG, - request, &request->sci_req, - &request->sci_request_handle); + &request->sci); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -312,14 +311,10 @@ static enum sci_status isci_task_request_build( goto errout; } - request->sci_request_handle->ireq = request; - /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEV) { isci_tmf->proto = SAS_PROTOCOL_SSP; - status = scic_task_request_construct_ssp( - request->sci_request_handle - ); + status = scic_task_request_construct_ssp(&request->sci); if (status != SCI_SUCCESS) goto errout; } @@ -376,8 +371,7 @@ static void isci_tmf_timeout_cb(void *tmf_request_arg) status = scic_controller_terminate_request( &request->isci_host->sci, &request->isci_device->sci, - request->sci_request_handle - ); + &request->sci); dev_dbg(&request->isci_host->pdev->dev, "%s: tmf_request = %p; tmf = %p; status = %d\n", @@ -467,9 +461,8 @@ int isci_task_execute_tmf( status = scic_controller_start_task( &isci_host->sci, sci_device, - request->sci_request_handle, - SCI_CONTROLLER_INVALID_IO_TAG - ); + &request->sci, + SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_TASK_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -764,13 +757,13 @@ static void isci_terminate_request_core( * device condition (if the request handle is NULL, then the * request completed but needed additional handling here). */ - if (isci_request->sci_request_handle != NULL) { + if (!isci_request->terminated) { was_terminated = true; needs_cleanup_handling = true; status = scic_controller_terminate_request( &isci_host->sci, &isci_device->sci, - isci_request->sci_request_handle); + &isci_request->sci); } spin_unlock_irqrestore(&isci_host->scic_lock, flags); @@ -1430,7 +1423,7 @@ isci_task_request_complete(struct isci_host *ihost, enum isci_request_status old_state; struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete; - struct scic_sds_request *sci_req = ireq->sci_request_handle; + struct scic_sds_request *sci_req = &ireq->sci; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", @@ -1460,12 +1453,11 @@ isci_task_request_complete(struct isci_host *ihost, /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(&ihost->sci, &idev->sci, - ireq->sci_request_handle); - /* NULL the request handle to make sure it cannot be terminated + scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci); + /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ - ireq->sci_request_handle = NULL; + ireq->terminated = true;; isci_request_change_state(ireq, unallocated); list_del_init(&ireq->dev_node); -- cgit v1.2.3-70-g09d2 From ce2b3261b6765c3b80fda95426c73e8d3bb1b035 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Sun, 8 May 2011 15:49:15 -0700 Subject: isci: unify constants cross driver constants are spread out over multiple header files, consolidate them into isci.h, and push some includes out to the source files that need them. TODO: remove SCI_MODE_SIZE infrastructure. TODO: task.h is full of inlines that are too large Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/core/sci_controller.h | 69 --- drivers/scsi/isci/core/sci_controller_constants.h | 206 --------- drivers/scsi/isci/core/sci_status.h | 409 ------------------ drivers/scsi/isci/core/sci_util.c | 1 + drivers/scsi/isci/core/scic_config_parameters.h | 11 - drivers/scsi/isci/core/scic_controller.h | 8 - drivers/scsi/isci/core/scic_io_request.h | 8 - drivers/scsi/isci/core/scic_phy.h | 1 - drivers/scsi/isci/core/scic_port.h | 2 +- drivers/scsi/isci/core/scic_sds_controller.c | 2 +- drivers/scsi/isci/core/scic_sds_controller.h | 2 - drivers/scsi/isci/core/scic_sds_phy.c | 1 + drivers/scsi/isci/core/scic_sds_port.c | 1 + drivers/scsi/isci/core/scic_sds_port.h | 10 +- .../isci/core/scic_sds_port_configuration_agent.c | 8 +- drivers/scsi/isci/core/scic_sds_request.c | 3 +- drivers/scsi/isci/core/scic_sds_request.h | 2 +- drivers/scsi/isci/core/scic_sds_stp_request.c | 1 + .../isci/core/scic_sds_unsolicited_frame_control.h | 19 +- drivers/scsi/isci/core/scic_task_request.h | 11 +- drivers/scsi/isci/core/scu_constants.h | 151 ------- drivers/scsi/isci/host.c | 2 + drivers/scsi/isci/host.h | 18 +- drivers/scsi/isci/init.c | 2 +- drivers/scsi/isci/isci.h | 480 ++++++++++++++++++++- drivers/scsi/isci/phy.c | 2 +- drivers/scsi/isci/phy.h | 5 +- drivers/scsi/isci/port.h | 3 + drivers/scsi/isci/probe_roms.c | 1 - drivers/scsi/isci/probe_roms.h | 1 + drivers/scsi/isci/remote_device.h | 1 - drivers/scsi/isci/remote_node_table.h | 9 +- drivers/scsi/isci/request.c | 1 + drivers/scsi/isci/request.h | 1 + drivers/scsi/isci/sci_environment.h | 3 +- drivers/scsi/isci/task.c | 2 + drivers/scsi/isci/task.h | 1 + drivers/scsi/isci/timers.c | 1 + 38 files changed, 497 insertions(+), 962 deletions(-) delete mode 100644 drivers/scsi/isci/core/sci_controller.h delete mode 100644 drivers/scsi/isci/core/sci_controller_constants.h delete mode 100644 drivers/scsi/isci/core/sci_status.h delete mode 100644 drivers/scsi/isci/core/scu_constants.h (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/core/sci_controller.h b/drivers/scsi/isci/core/sci_controller.h deleted file mode 100644 index 01316b1a8f2..00000000000 --- a/drivers/scsi/isci/core/sci_controller.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCI_CONTROLLER_H_ -#define _SCI_CONTROLLER_H_ - -/** - * This file contains all of the interface methods that can be called by an SCI - * user on all SCI controller objects. - * - * - */ - -#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF - -#endif /* _SCI_CONTROLLER_H_ */ - diff --git a/drivers/scsi/isci/core/sci_controller_constants.h b/drivers/scsi/isci/core/sci_controller_constants.h deleted file mode 100644 index 2525c26d7c0..00000000000 --- a/drivers/scsi/isci/core/sci_controller_constants.h +++ /dev/null @@ -1,206 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCI_CONTROLLER_CONSTANTS_H_ -#define _SCI_CONTROLLER_CONSTANTS_H_ - -/** - * This file contains constant values that change based on the type of core or - * framework being managed. These constants are exported in order to - * provide the user with information as to the bounds (i.e. how many) of - * specific objects. - * - * - */ - -#ifndef SCI_MAX_PHYS -/** - * - * - * This constant defines the maximum number of phy objects that can be - * supported for the SCU Driver Standard (SDS) library. This is tied directly - * to silicon capabilities. - */ -#define SCI_MAX_PHYS (4) -#endif - -#ifndef SCI_MAX_PORTS -/** - * - * - * This constant defines the maximum number of port objects that can be - * supported for the SCU Driver Standard (SDS) library. This is tied directly - * to silicon capabilities. - */ -#define SCI_MAX_PORTS SCI_MAX_PHYS -#endif - -#ifndef SCI_MIN_SMP_PHYS -/** - * - * - * This constant defines the minimum number of SMP phy objects that can be - * supported for a single expander level. This was determined by using 36 - * physical phys and room for 2 virtual phys. - */ -#define SCI_MIN_SMP_PHYS (38) -#endif - -#ifndef SCI_MAX_SMP_PHYS -/** - * - * - * This constant defines the maximum number of SMP phy objects that can be - * supported for the SCU Driver Standard (SDS) library. This number can be - * increased if required. - */ -#define SCI_MAX_SMP_PHYS (384) -#endif - -#ifndef SCI_MAX_REMOTE_DEVICES -/** - * - * - * This constant defines the maximum number of remote device objects that can - * be supported for the SCU Driver Standard (SDS) library. This is tied - * directly to silicon capabilities. - */ -#define SCI_MAX_REMOTE_DEVICES (256) -#endif - -#ifndef SCI_MIN_REMOTE_DEVICES -/** - * - * - * This constant defines the minimum number of remote device objects that can - * be supported for the SCU Driver Standard (SDS) library. This # can be - * configured for minimum memory environments to any value less than - * SCI_MAX_REMOTE_DEVICES - */ -#define SCI_MIN_REMOTE_DEVICES (16) -#endif - -#ifndef SCI_MAX_IO_REQUESTS -/** - * - * - * This constant defines the maximum number of IO request objects that can be - * supported for the SCU Driver Standard (SDS) library. This is tied directly - * to silicon capabilities. - */ -#define SCI_MAX_IO_REQUESTS (256) -#endif - -#ifndef SCI_MIN_IO_REQUESTS -/** - * - * - * This constant defines the minimum number of IO request objects that can be - * supported for the SCU Driver Standard (SDS) library. This # can be - * configured for minimum memory environments to any value less than - * SCI_MAX_IO_REQUESTS. - */ -#define SCI_MIN_IO_REQUESTS (1) -#endif - -#ifndef SCI_MAX_MSIX_MESSAGES -/** - * - * - * This constant defines the maximum number of MSI-X interrupt vectors/messages - * supported for an SCU hardware controller instance. - */ -#define SCI_MAX_MSIX_MESSAGES (2) -#endif - -#ifndef SCI_MAX_SCATTER_GATHER_ELEMENTS -/** - * - * - * This constant defines the maximum number of Scatter-Gather Elements to be - * used by any SCI component. - */ -#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 -#endif - -#ifndef SCI_MIN_SCATTER_GATHER_ELEMENTS -/** - * - * - * This constant defines the minimum number of Scatter-Gather Elements to be - * used by any SCI component. - */ -#define SCI_MIN_SCATTER_GATHER_ELEMENTS 1 -#endif - -/** - * - * - * This constant defines the maximum number of controllers that can occur in a - * single silicon package. - */ -#define SCI_MAX_CONTROLLERS 2 - -/** - * - * - * The maximum number of supported domain objects is currently tied to the - * maximum number of support port objects. - */ -#define SCI_MAX_DOMAINS SCI_MAX_PORTS - - -#endif /* _SCI_CONTROLLER_CONSTANTS_H_ */ - diff --git a/drivers/scsi/isci/core/sci_status.h b/drivers/scsi/isci/core/sci_status.h deleted file mode 100644 index 8b66619022c..00000000000 --- a/drivers/scsi/isci/core/sci_status.h +++ /dev/null @@ -1,409 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCI_STATUS_H_ -#define _SCI_STATUS_H_ - -/** - * This file contains all of the return status codes utilized across the - * various sub-components in SCI. - * - * - */ - - -/** - * enum sci_status - This is the general return status enumeration for non-IO, - * non-task management related SCI interface methods. - * - * - */ -enum sci_status { - /** - * This member indicates successful completion. - */ - SCI_SUCCESS = 0, - - /** - * This value indicates that the calling method completed successfully, - * but that the IO may have completed before having it's start method - * invoked. This occurs during SAT translation for requests that do - * not require an IO to the target or for any other requests that may - * be completed without having to submit IO. - */ - SCI_SUCCESS_IO_COMPLETE_BEFORE_START, - - /** - * This Value indicates that the SCU hardware returned an early response - * because the io request specified more data than is returned by the - * target device (mode pages, inquiry data, etc.). The completion routine - * will handle this case to get the actual number of bytes transferred. - */ - SCI_SUCCESS_IO_DONE_EARLY, - - /** - * This member indicates that the object for which a state change is - * being requested is already in said state. - */ - SCI_WARNING_ALREADY_IN_STATE, - - /** - * This member indicates interrupt coalescence timer may cause SAS - * specification compliance issues (i.e. SMP target mode response - * frames must be returned within 1.9 milliseconds). - */ - SCI_WARNING_TIMER_CONFLICT, - - /** - * This field indicates a sequence of action is not completed yet. Mostly, - * this status is used when multiple ATA commands are needed in a SATI translation. - */ - SCI_WARNING_SEQUENCE_INCOMPLETE, - - /** - * This member indicates that there was a general failure. - */ - SCI_FAILURE, - - /** - * This member indicates that the SCI implementation is unable to complete - * an operation due to a critical flaw the prevents any further operation - * (i.e. an invalid pointer). - */ - SCI_FATAL_ERROR, - - /** - * This member indicates the calling function failed, because the state - * of the controller is in a state that prevents successful completion. - */ - SCI_FAILURE_INVALID_STATE, - - /** - * This member indicates the calling function failed, because there is - * insufficient resources/memory to complete the request. - */ - SCI_FAILURE_INSUFFICIENT_RESOURCES, - - /** - * This member indicates the calling function failed, because the - * controller object required for the operation can't be located. - */ - SCI_FAILURE_CONTROLLER_NOT_FOUND, - - /** - * This member indicates the calling function failed, because the - * discovered controller type is not supported by the library. - */ - SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE, - - /** - * This member indicates the calling function failed, because the - * requested initialization data version isn't supported. - */ - SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION, - - /** - * This member indicates the calling function failed, because the - * requested configuration of SAS Phys into SAS Ports is not supported. - */ - SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION, - - /** - * This member indicates the calling function failed, because the - * requested protocol is not supported by the remote device, port, - * or controller. - */ - SCI_FAILURE_UNSUPPORTED_PROTOCOL, - - /** - * This member indicates the calling function failed, because the - * requested information type is not supported by the SCI implementation. - */ - SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE, - - /** - * This member indicates the calling function failed, because the - * device already exists. - */ - SCI_FAILURE_DEVICE_EXISTS, - - /** - * This member indicates the calling function failed, because adding - * a phy to the object is not possible. - */ - SCI_FAILURE_ADDING_PHY_UNSUPPORTED, - - /** - * This member indicates the calling function failed, because the - * requested information type is not supported by the SCI implementation. - */ - SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD, - - /** - * This member indicates the calling function failed, because the SCI - * implementation does not support the supplied time limit. - */ - SCI_FAILURE_UNSUPPORTED_TIME_LIMIT, - - /** - * This member indicates the calling method failed, because the SCI - * implementation does not contain the specified Phy. - */ - SCI_FAILURE_INVALID_PHY, - - /** - * This member indicates the calling method failed, because the SCI - * implementation does not contain the specified Port. - */ - SCI_FAILURE_INVALID_PORT, - - /** - * This member indicates the calling method was partly successful - * The port was reset but not all phys in port are operational - */ - SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS, - - /** - * This member indicates that calling method failed - * The port reset did not complete because none of the phys are operational - */ - SCI_FAILURE_RESET_PORT_FAILURE, - - /** - * This member indicates the calling method failed, because the SCI - * implementation does not contain the specified remote device. - */ - SCI_FAILURE_INVALID_REMOTE_DEVICE, - - /** - * This member indicates the calling method failed, because the remote - * device is in a bad state and requires a reset. - */ - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, - - /** - * This member indicates the calling method failed, because the SCI - * implementation does not contain or support the specified IO tag. - */ - SCI_FAILURE_INVALID_IO_TAG, - - /** - * This member indicates that the operation failed and the user should - * check the response data associated with the IO. - */ - SCI_FAILURE_IO_RESPONSE_VALID, - - /** - * This member indicates that the operation failed, the failure is - * controller implementation specific, and the response data associated - * with the request is not valid. You can query for the controller - * specific error information via scic_controller_get_request_status() - */ - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, - - /** - * This member indicated that the operation failed because the - * user requested this IO to be terminated. - */ - SCI_FAILURE_IO_TERMINATED, - - /** - * This member indicates that the operation failed and the associated - * request requires a SCSI abort task to be sent to the target. - */ - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, - - /** - * This member indicates that the operation failed because the supplied - * device could not be located. - */ - SCI_FAILURE_DEVICE_NOT_FOUND, - - /** - * This member indicates that the operation failed because the - * objects association is required and is not correctly set. - */ - SCI_FAILURE_INVALID_ASSOCIATION, - - /** - * This member indicates that the operation failed, because a timeout - * occurred. - */ - SCI_FAILURE_TIMEOUT, - - /** - * This member indicates that the operation failed, because the user - * specified a value that is either invalid or not supported. - */ - SCI_FAILURE_INVALID_PARAMETER_VALUE, - - /** - * This value indicates that the operation failed, because the number - * of messages (MSI-X) is not supported. - */ - SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT, - - /** - * This value indicates that the method failed due to a lack of - * available NCQ tags. - */ - SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, - - /** - * This value indicates that a protocol violation has occurred on the - * link. - */ - SCI_FAILURE_PROTOCOL_VIOLATION, - - /** - * This value indicates a failure condition that retry may help to clear. - */ - SCI_FAILURE_RETRY_REQUIRED, - - /** - * This field indicates the retry limit was reached when a retry is attempted - */ - SCI_FAILURE_RETRY_LIMIT_REACHED, - - /** - * This member indicates the calling method was partly successful. - * Mostly, this status is used when a LUN_RESET issued to an expander attached - * STP device in READY NCQ substate needs to have RNC suspended/resumed - * before posting TC. - */ - SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS, - - /** - * This field indicates an illegal phy connection based on the routing attribute - * of both expander phy attached to each other. - */ - SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION, - - /** - * This field indicates a CONFIG ROUTE INFO command has a response with function result - * INDEX DOES NOT EXIST, usually means exceeding max route index. - */ - SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX, - - /** - * This value indicates that an unsupported PCI device ID has been - * specified. This indicates that attempts to invoke - * scic_library_allocate_controller() will fail. - */ - SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID - -}; - -/** - * enum sci_io_status - This enumeration depicts all of the possible IO - * completion status values. Each value in this enumeration maps directly - * to a value in the enum sci_status enumeration. Please refer to that - * enumeration for detailed comments concerning what the status represents. - * - * Add the API to retrieve the SCU status from the core. Check to see that the - * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL - * - SCI_IO_FAILURE_INVALID_IO_TAG - */ -enum sci_io_status { - SCI_IO_SUCCESS = SCI_SUCCESS, - SCI_IO_FAILURE = SCI_FAILURE, - SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START, - SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY, - SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, - SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, - SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, - SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, - SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, - SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, - SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, - SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, - SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION, - - SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, - - SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED, - SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED, - SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE -}; - -/** - * enum sci_task_status - This enumeration depicts all of the possible task - * completion status values. Each value in this enumeration maps directly - * to a value in the enum sci_status enumeration. Please refer to that - * enumeration for detailed comments concerning what the status represents. - * - * Check to see that the following status are properly handled: - */ -enum sci_task_status { - SCI_TASK_SUCCESS = SCI_SUCCESS, - SCI_TASK_FAILURE = SCI_FAILURE, - SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, - SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, - SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, - SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG, - SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, - SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, - SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, - SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, - - SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, - SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS - -}; - - -#endif /* _SCI_STATUS_H_ */ - diff --git a/drivers/scsi/isci/core/sci_util.c b/drivers/scsi/isci/core/sci_util.c index 4e60d55836e..0101fec2386 100644 --- a/drivers/scsi/isci/core/sci_util.c +++ b/drivers/scsi/isci/core/sci_util.c @@ -56,6 +56,7 @@ #include <linux/kernel.h> #include "sci_util.h" #include "sci_environment.h" +#include "request.h" void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr) { diff --git a/drivers/scsi/isci/core/scic_config_parameters.h b/drivers/scsi/isci/core/scic_config_parameters.h index ea09d0bd2d1..8b8c9259f52 100644 --- a/drivers/scsi/isci/core/scic_config_parameters.h +++ b/drivers/scsi/isci/core/scic_config_parameters.h @@ -56,17 +56,6 @@ #ifndef _SCIC_SDS_USER_PARAMETERS_H_ #define _SCIC_SDS_USER_PARAMETERS_H_ -/** - * This file contains all of the structure definitions and interface methods - * that can be called by a SCIC user on the SCU Driver Standard - * (struct scic_sds_user_parameters) user parameter block. - * - * - */ - - -#include "sci_status.h" -#include "sci_controller_constants.h" #include "probe_roms.h" struct scic_sds_controller; diff --git a/drivers/scsi/isci/core/scic_controller.h b/drivers/scsi/isci/core/scic_controller.h index 50ba155a6c7..bd08f306ed6 100644 --- a/drivers/scsi/isci/core/scic_controller.h +++ b/drivers/scsi/isci/core/scic_controller.h @@ -56,8 +56,6 @@ #ifndef _SCIC_CONTROLLER_H_ #define _SCIC_CONTROLLER_H_ -#include "sci_status.h" -#include "sci_controller.h" #include "scic_config_parameters.h" struct scic_sds_request; @@ -65,12 +63,6 @@ struct scic_sds_phy; struct scic_sds_port; struct scic_sds_remote_device; - -enum sci_controller_mode { - SCI_MODE_SPEED, /* Optimized for performance */ - SCI_MODE_SIZE /* Optimized for memory use */ -}; - enum sci_status scic_controller_construct(struct scic_sds_controller *c, void __iomem *scu_base, void __iomem *smu_base); diff --git a/drivers/scsi/isci/core/scic_io_request.h b/drivers/scsi/isci/core/scic_io_request.h index f7c6d426f8c..a4664cc3c57 100644 --- a/drivers/scsi/isci/core/scic_io_request.h +++ b/drivers/scsi/isci/core/scic_io_request.h @@ -56,15 +56,7 @@ #ifndef _SCIC_IO_REQUEST_H_ #define _SCIC_IO_REQUEST_H_ -/** - * This file contains the structures and interface methods that can be - * referenced and used by the SCI user for the SCI IO request object. - * - * Determine the failure situations and return values. - */ - #include <linux/kernel.h> -#include "sci_status.h" struct scic_sds_request; struct scic_sds_remote_device; diff --git a/drivers/scsi/isci/core/scic_phy.h b/drivers/scsi/isci/core/scic_phy.h index 1fb49f02162..f046b4af4b8 100644 --- a/drivers/scsi/isci/core/scic_phy.h +++ b/drivers/scsi/isci/core/scic_phy.h @@ -66,7 +66,6 @@ #include <scsi/sas.h> #include <scsi/libsas.h> -#include "sci_status.h" struct scic_sds_phy; struct scic_sds_port; diff --git a/drivers/scsi/isci/core/scic_port.h b/drivers/scsi/isci/core/scic_port.h index 44a8ea88715..51e7eede5c8 100644 --- a/drivers/scsi/isci/core/scic_port.h +++ b/drivers/scsi/isci/core/scic_port.h @@ -56,8 +56,8 @@ #ifndef _SCIC_PORT_H_ #define _SCIC_PORT_H_ +#include "isci.h" #include "sas.h" -#include "sci_status.h" #include "scic_phy.h" struct scic_sds_port; diff --git a/drivers/scsi/isci/core/scic_sds_controller.c b/drivers/scsi/isci/core/scic_sds_controller.c index 852b7d52f84..e77265b9b9e 100644 --- a/drivers/scsi/isci/core/scic_sds_controller.c +++ b/drivers/scsi/isci/core/scic_sds_controller.c @@ -68,11 +68,11 @@ #include "sci_environment.h" #include "sci_util.h" #include "scu_completion_codes.h" -#include "scu_constants.h" #include "scu_event_codes.h" #include "scu_remote_node_context.h" #include "scu_task_context.h" #include "scu_unsolicited_frame.h" +#include "timers.h" #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 diff --git a/drivers/scsi/isci/core/scic_sds_controller.h b/drivers/scsi/isci/core/scic_sds_controller.h index 0d5047319cb..5c00f9688c1 100644 --- a/drivers/scsi/isci/core/scic_sds_controller.h +++ b/drivers/scsi/isci/core/scic_sds_controller.h @@ -67,7 +67,6 @@ */ #include "sci_pool.h" -#include "sci_controller_constants.h" #include "sci_base_state.h" #include "sci_base_state_machine.h" #include "scic_config_parameters.h" @@ -76,7 +75,6 @@ #include "remote_node_table.h" #include "remote_device.h" #include "scu_registers.h" -#include "scu_constants.h" #include "scu_task_context.h" #include "scu_unsolicited_frame.h" #include "scic_sds_unsolicited_frame_control.h" diff --git a/drivers/scsi/isci/core/scic_sds_phy.c b/drivers/scsi/isci/core/scic_sds_phy.c index f0f4c74e461..c6df0e2c842 100644 --- a/drivers/scsi/isci/core/scic_sds_phy.c +++ b/drivers/scsi/isci/core/scic_sds_phy.c @@ -65,6 +65,7 @@ #include "sci_environment.h" #include "sci_util.h" #include "scu_event_codes.h" +#include "timers.h" #define SCIC_SDS_PHY_MIN_TIMER_COUNT (SCI_MAX_PHYS) #define SCIC_SDS_PHY_MAX_TIMER_COUNT (SCI_MAX_PHYS) diff --git a/drivers/scsi/isci/core/scic_sds_port.c b/drivers/scsi/isci/core/scic_sds_port.c index 01288dd44fb..9302e397e4c 100644 --- a/drivers/scsi/isci/core/scic_sds_port.c +++ b/drivers/scsi/isci/core/scic_sds_port.c @@ -64,6 +64,7 @@ #include "scic_sds_request.h" #include "sci_environment.h" #include "scu_registers.h" +#include "timers.h" #define SCIC_SDS_PORT_MIN_TIMER_COUNT (SCI_MAX_PORTS) #define SCIC_SDS_PORT_MAX_TIMER_COUNT (SCI_MAX_PORTS) diff --git a/drivers/scsi/isci/core/scic_sds_port.h b/drivers/scsi/isci/core/scic_sds_port.h index 3633561b686..3696debcce1 100644 --- a/drivers/scsi/isci/core/scic_sds_port.h +++ b/drivers/scsi/isci/core/scic_sds_port.h @@ -58,21 +58,15 @@ #include <linux/kernel.h> #include "sas.h" -#include "sci_controller_constants.h" #include "scu_registers.h" - -#define SCIC_SDS_DUMMY_PORT 0xFF +#include "sci_base_state_machine.h" struct scic_sds_controller; struct scic_sds_phy; struct scic_sds_remote_device; struct scic_sds_request; -/** - * This constant defines the value utilized by SCI Components to indicate - * an invalid handle. - */ -#define SCI_INVALID_HANDLE 0x0 +#define SCIC_SDS_DUMMY_PORT 0xFF /** * enum SCIC_SDS_PORT_READY_SUBSTATES - diff --git a/drivers/scsi/isci/core/scic_sds_port_configuration_agent.c b/drivers/scsi/isci/core/scic_sds_port_configuration_agent.c index 2d3d067396d..3fad8c1db6c 100644 --- a/drivers/scsi/isci/core/scic_sds_port_configuration_agent.c +++ b/drivers/scsi/isci/core/scic_sds_port_configuration_agent.c @@ -53,17 +53,11 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** - * This file contains the implementation for the public and protected methods - * for the port configuration agent. - * - * - */ - #include "sci_environment.h" #include "scic_controller.h" #include "scic_sds_controller.h" #include "scic_sds_port_configuration_agent.h" +#include "timers.h" #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c index 50dd19bba3d..de35885eb1f 100644 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ b/drivers/scsi/isci/core/scic_sds_request.c @@ -67,8 +67,9 @@ #include "sci_environment.h" #include "sci_util.h" #include "scu_completion_codes.h" -#include "scu_constants.h" #include "scu_task_context.h" +#include "request.h" +#include "task.h" /* * **************************************************************************** diff --git a/drivers/scsi/isci/core/scic_sds_request.h b/drivers/scsi/isci/core/scic_sds_request.h index 1dd98aabe32..5ce7ff2aed9 100644 --- a/drivers/scsi/isci/core/scic_sds_request.h +++ b/drivers/scsi/isci/core/scic_sds_request.h @@ -56,11 +56,11 @@ #ifndef _SCIC_SDS_IO_REQUEST_H_ #define _SCIC_SDS_IO_REQUEST_H_ +#include "isci.h" #include "scic_io_request.h" #include "sci_base_state_machine.h" #include "scu_task_context.h" #include "scic_sds_stp_request.h" -#include "scu_constants.h" #include "sas.h" struct scic_sds_controller; diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c index 2677393db6f..c1c316cad52 100644 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ b/drivers/scsi/isci/core/scic_sds_stp_request.c @@ -69,6 +69,7 @@ #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "scu_task_context.h" +#include "request.h" void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) { diff --git a/drivers/scsi/isci/core/scic_sds_unsolicited_frame_control.h b/drivers/scsi/isci/core/scic_sds_unsolicited_frame_control.h index 4eb244c06cf..0d8ca8c4770 100644 --- a/drivers/scsi/isci/core/scic_sds_unsolicited_frame_control.h +++ b/drivers/scsi/isci/core/scic_sds_unsolicited_frame_control.h @@ -53,19 +53,11 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** - * This file contains all of the unsolicited frame related management for the - * address table, the headers, and actual payload buffers. - * - * - */ - #ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ #define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ +#include "isci.h" #include "scu_unsolicited_frame.h" -#include "scu_constants.h" -#include "sci_status.h" /** * enum unsolicited_frame_state - @@ -144,15 +136,6 @@ struct scic_sds_uf_header_array { }; -/* - * Determine the size of the unsolicited frame array including - * unused buffers. */ -#if SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES -#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MIN_UF_TABLE_ENTRIES -#else -#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MAX_UNSOLICITED_FRAMES -#endif /* SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES */ - /** * struct scic_sds_uf_buffer_array - * diff --git a/drivers/scsi/isci/core/scic_task_request.h b/drivers/scsi/isci/core/scic_task_request.h index 98cfaa9e6d3..790cee9b4af 100644 --- a/drivers/scsi/isci/core/scic_task_request.h +++ b/drivers/scsi/isci/core/scic_task_request.h @@ -56,16 +56,7 @@ #ifndef _SCIC_TASK_REQUEST_H_ #define _SCIC_TASK_REQUEST_H_ -/** - * This file contains the structures and interface methods that can be - * referenced and used by the SCI user for to utilize task management - * requests. - * - * - */ - - -#include "sci_status.h" +#include "isci.h" struct scic_sds_request; struct scic_sds_remote_device; diff --git a/drivers/scsi/isci/core/scu_constants.h b/drivers/scsi/isci/core/scu_constants.h deleted file mode 100644 index a99d1103ad3..00000000000 --- a/drivers/scsi/isci/core/scu_constants.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCU_CONSTANTS_H_ -#define _SCU_CONSTANTS_H_ - -/** - * This file contains the SCU hardware constants. - * - * - */ - -#include "sci_controller_constants.h" - -/** - * - * - * 2 indicates the maximum number of UFs that can occur for a given IO request. - * The hardware handles reception of additional unsolicited frames while all - * UFs are in use, by holding off the transmitting device. This number could - * be theoretically reduced to 1, but 2 provides for more reliable operation. - * During SATA PIO operation, it is possible under some conditions for there to - * be 3 separate FISes received, back to back to back (PIO Setup, Data, D2H - * Register). It is unlikely to have all 3 pending all at once without some of - * them already being processed. - */ -#define SCU_MIN_UNSOLICITED_FRAMES (1) -#define SCU_MIN_CRITICAL_NOTIFICATIONS (24) -#define SCU_MIN_EVENTS (4) -#define SCU_MIN_COMPLETION_QUEUE_SCRATCH (2) -#define SCU_MIN_COMPLETION_QUEUE_ENTRIES (SCU_MIN_CRITICAL_NOTIFICATIONS \ - + SCU_MIN_EVENTS \ - + SCU_MIN_UNSOLICITED_FRAMES \ - + SCI_MIN_IO_REQUESTS \ - + SCU_MIN_COMPLETION_QUEUE_SCRATCH) - -#define SCU_MAX_CRITICAL_NOTIFICATIONS (384) -#define SCU_MAX_EVENTS (128) -#define SCU_MAX_UNSOLICITED_FRAMES (128) -#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128) -#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \ - + SCU_MAX_EVENTS \ - + SCU_MAX_UNSOLICITED_FRAMES \ - + SCI_MAX_IO_REQUESTS \ - + SCU_MAX_COMPLETION_QUEUE_SCRATCH) - -#if !defined(ENABLE_MINIMUM_MEMORY_MODE) -#define SCU_UNSOLICITED_FRAME_COUNT SCU_MAX_UNSOLICITED_FRAMES -#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MAX_CRITICAL_NOTIFICATIONS -#define SCU_EVENT_COUNT SCU_MAX_EVENTS -#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MAX_COMPLETION_QUEUE_SCRATCH -#define SCU_IO_REQUEST_COUNT SCI_MAX_IO_REQUESTS -#define SCU_IO_REQUEST_SGE_COUNT SCI_MAX_SCATTER_GATHER_ELEMENTS -#define SCU_COMPLETION_QUEUE_COUNT SCU_MAX_COMPLETION_QUEUE_ENTRIES -#else -#define SCU_UNSOLICITED_FRAME_COUNT SCU_MIN_UNSOLICITED_FRAMES -#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MIN_CRITICAL_NOTIFICATIONS -#define SCU_EVENT_COUNT SCU_MIN_EVENTS -#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MIN_COMPLETION_QUEUE_SCRATCH -#define SCU_IO_REQUEST_COUNT SCI_MIN_IO_REQUESTS -#define SCU_IO_REQUEST_SGE_COUNT SCI_MIN_SCATTER_GATHER_ELEMENTS -#define SCU_COMPLETION_QUEUE_COUNT SCU_MIN_COMPLETION_QUEUE_ENTRIES -#endif /* !defined(ENABLE_MINIMUM_MEMORY_OPERATION) */ - -/** - * - * - * The SCU_COMPLETION_QUEUE_COUNT constant indicates the size of the completion - * queue into which the hardware DMAs 32-bit quantas (completion entries). - */ - -/** - * - * - * This queue must be programmed to a power of 2 size (e.g. 32, 64, 1024, etc.). - */ -#if (SCU_COMPLETION_QUEUE_COUNT != 16) && \ - (SCU_COMPLETION_QUEUE_COUNT != 32) && \ - (SCU_COMPLETION_QUEUE_COUNT != 64) && \ - (SCU_COMPLETION_QUEUE_COUNT != 128) && \ - (SCU_COMPLETION_QUEUE_COUNT != 256) && \ - (SCU_COMPLETION_QUEUE_COUNT != 512) && \ - (SCU_COMPLETION_QUEUE_COUNT != 1024) -#error "SCU_COMPLETION_QUEUE_COUNT must be set to a power of 2." -#endif - -#if SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES -#error "Invalid configuration of unsolicited frame constants" -#endif /* SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES */ - -#define SCU_MIN_UF_TABLE_ENTRIES (8) -#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) -#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) -#define SCU_INVALID_FRAME_INDEX (0xFFFF) - -#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) -#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF) - -#endif /* _SCU_CONSTANTS_H_ */ diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 271a7e171e7..5847149857a 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -60,7 +60,9 @@ #include "request.h" #include "host.h" #include "probe_roms.h" +#include "scic_controller.h" #include "scic_sds_controller.h" +#include "timers.h" irqreturn_t isci_msix_isr(int vec, void *data) { diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index afa41e83eaa..13c1c99ef29 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -53,26 +53,12 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - -#if !defined(_SCI_HOST_H_) +#ifndef _SCI_HOST_H_ #define _SCI_HOST_H_ -#include "phy.h" #include "scic_sds_controller.h" -#include "timers.h" #include "remote_device.h" - -#define DRV_NAME "isci" -#define SCI_PCI_BAR_COUNT 2 -#define SCI_NUM_MSI_X_INT 2 -#define SCI_SMU_BAR 0 -#define SCI_SMU_BAR_SIZE (16*1024) -#define SCI_SCU_BAR 1 -#define SCI_SCU_BAR_SIZE (4*1024*1024) -#define SCI_IO_SPACE_BAR0 2 -#define SCI_IO_SPACE_BAR1 3 -#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */ -#define SCIC_CONTROLLER_STOP_TIMEOUT 5000 +#include "phy.h" struct isci_host { struct scic_sds_controller sci; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 522d39f9e13..df132c07bad 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -61,9 +61,9 @@ #include <asm/string.h> #include "isci.h" #include "task.h" -#include "sci_controller_constants.h" #include "sci_environment.h" #include "probe_roms.h" +#include "scic_controller.h" static struct scsi_transport_template *isci_transport_template; diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 60c84627c13..800f2332ecd 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -56,22 +56,470 @@ #ifndef __ISCI_H__ #define __ISCI_H__ -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/types.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> -#include <linux/bug.h> -#include <scsi/libsas.h> -#include <scsi/scsi.h> - -#include "scic_controller.h" -#include "host.h" -#include "timers.h" -#include "sci_status.h" -#include "request.h" -#include "task.h" -#include "sata.h" + +#define DRV_NAME "isci" +#define SCI_PCI_BAR_COUNT 2 +#define SCI_NUM_MSI_X_INT 2 +#define SCI_SMU_BAR 0 +#define SCI_SMU_BAR_SIZE (16*1024) +#define SCI_SCU_BAR 1 +#define SCI_SCU_BAR_SIZE (4*1024*1024) +#define SCI_IO_SPACE_BAR0 2 +#define SCI_IO_SPACE_BAR1 3 +#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */ +#define SCIC_CONTROLLER_STOP_TIMEOUT 5000 + +#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF + +enum sci_controller_mode { + SCI_MODE_SPEED, + SCI_MODE_SIZE /* deprecated */ +}; + +#define SCI_MAX_PHYS (4) +#define SCI_MAX_PORTS SCI_MAX_PHYS +#define SCI_MIN_SMP_PHYS (38) +#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ +#define SCI_MAX_REMOTE_DEVICES (256) +#define SCI_MIN_REMOTE_DEVICES (16) +#define SCI_MAX_IO_REQUESTS (256) +#define SCI_MIN_IO_REQUESTS (1) +#define SCI_MAX_MSIX_MESSAGES (2) +#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */ +#define SCI_MIN_SCATTER_GATHER_ELEMENTS 1 +#define SCI_MAX_CONTROLLERS 2 +#define SCI_MAX_DOMAINS SCI_MAX_PORTS + +/* 2 indicates the maximum number of UFs that can occur for a given IO request. + * The hardware handles reception of additional unsolicited frames while all + * UFs are in use, by holding off the transmitting device. This number could + * be theoretically reduced to 1, but 2 provides for more reliable operation. + * During SATA PIO operation, it is possible under some conditions for there to + * be 3 separate FISes received, back to back to back (PIO Setup, Data, D2H + * Register). It is unlikely to have all 3 pending all at once without some of + * them already being processed. + */ +#define SCU_MIN_UNSOLICITED_FRAMES (1) +#define SCU_MIN_CRITICAL_NOTIFICATIONS (24) +#define SCU_MIN_EVENTS (4) +#define SCU_MIN_COMPLETION_QUEUE_SCRATCH (2) +#define SCU_MIN_COMPLETION_QUEUE_ENTRIES (SCU_MIN_CRITICAL_NOTIFICATIONS \ + + SCU_MIN_EVENTS \ + + SCU_MIN_UNSOLICITED_FRAMES \ + + SCI_MIN_IO_REQUESTS \ + + SCU_MIN_COMPLETION_QUEUE_SCRATCH) + +#define SCU_MAX_CRITICAL_NOTIFICATIONS (384) +#define SCU_MAX_EVENTS (128) +#define SCU_MAX_UNSOLICITED_FRAMES (128) +#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128) +#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \ + + SCU_MAX_EVENTS \ + + SCU_MAX_UNSOLICITED_FRAMES \ + + SCI_MAX_IO_REQUESTS \ + + SCU_MAX_COMPLETION_QUEUE_SCRATCH) + +#if !defined(ENABLE_MINIMUM_MEMORY_MODE) +#define SCU_UNSOLICITED_FRAME_COUNT SCU_MAX_UNSOLICITED_FRAMES +#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MAX_CRITICAL_NOTIFICATIONS +#define SCU_EVENT_COUNT SCU_MAX_EVENTS +#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MAX_COMPLETION_QUEUE_SCRATCH +#define SCU_IO_REQUEST_COUNT SCI_MAX_IO_REQUESTS +#define SCU_IO_REQUEST_SGE_COUNT SCI_MAX_SCATTER_GATHER_ELEMENTS +#define SCU_COMPLETION_QUEUE_COUNT SCU_MAX_COMPLETION_QUEUE_ENTRIES +#else +#define SCU_UNSOLICITED_FRAME_COUNT SCU_MIN_UNSOLICITED_FRAMES +#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MIN_CRITICAL_NOTIFICATIONS +#define SCU_EVENT_COUNT SCU_MIN_EVENTS +#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MIN_COMPLETION_QUEUE_SCRATCH +#define SCU_IO_REQUEST_COUNT SCI_MIN_IO_REQUESTS +#define SCU_IO_REQUEST_SGE_COUNT SCI_MIN_SCATTER_GATHER_ELEMENTS +#define SCU_COMPLETION_QUEUE_COUNT SCU_MIN_COMPLETION_QUEUE_ENTRIES +#endif /* !defined(ENABLE_MINIMUM_MEMORY_OPERATION) */ + +/** + * + * + * The SCU_COMPLETION_QUEUE_COUNT constant indicates the size of the completion + * queue into which the hardware DMAs 32-bit quantas (completion entries). + */ + +/** + * + * + * This queue must be programmed to a power of 2 size (e.g. 32, 64, 1024, etc.). + */ +#if (SCU_COMPLETION_QUEUE_COUNT != 16) && \ + (SCU_COMPLETION_QUEUE_COUNT != 32) && \ + (SCU_COMPLETION_QUEUE_COUNT != 64) && \ + (SCU_COMPLETION_QUEUE_COUNT != 128) && \ + (SCU_COMPLETION_QUEUE_COUNT != 256) && \ + (SCU_COMPLETION_QUEUE_COUNT != 512) && \ + (SCU_COMPLETION_QUEUE_COUNT != 1024) +#error "SCU_COMPLETION_QUEUE_COUNT must be set to a power of 2." +#endif + +#if SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES +#error "Invalid configuration of unsolicited frame constants" +#endif /* SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES */ + +#define SCU_MIN_UF_TABLE_ENTRIES (8) +#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) +#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) +#define SCU_INVALID_FRAME_INDEX (0xFFFF) + +#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) +#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF) + +/* + * Determine the size of the unsolicited frame array including + * unused buffers. */ +#if SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES +#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MIN_UF_TABLE_ENTRIES +#else +#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MAX_UNSOLICITED_FRAMES +#endif /* SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES */ + +/** + * enum sci_status - This is the general return status enumeration for non-IO, + * non-task management related SCI interface methods. + * + * + */ +enum sci_status { + /** + * This member indicates successful completion. + */ + SCI_SUCCESS = 0, + + /** + * This value indicates that the calling method completed successfully, + * but that the IO may have completed before having it's start method + * invoked. This occurs during SAT translation for requests that do + * not require an IO to the target or for any other requests that may + * be completed without having to submit IO. + */ + SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + + /** + * This Value indicates that the SCU hardware returned an early response + * because the io request specified more data than is returned by the + * target device (mode pages, inquiry data, etc.). The completion routine + * will handle this case to get the actual number of bytes transferred. + */ + SCI_SUCCESS_IO_DONE_EARLY, + + /** + * This member indicates that the object for which a state change is + * being requested is already in said state. + */ + SCI_WARNING_ALREADY_IN_STATE, + + /** + * This member indicates interrupt coalescence timer may cause SAS + * specification compliance issues (i.e. SMP target mode response + * frames must be returned within 1.9 milliseconds). + */ + SCI_WARNING_TIMER_CONFLICT, + + /** + * This field indicates a sequence of action is not completed yet. Mostly, + * this status is used when multiple ATA commands are needed in a SATI translation. + */ + SCI_WARNING_SEQUENCE_INCOMPLETE, + + /** + * This member indicates that there was a general failure. + */ + SCI_FAILURE, + + /** + * This member indicates that the SCI implementation is unable to complete + * an operation due to a critical flaw the prevents any further operation + * (i.e. an invalid pointer). + */ + SCI_FATAL_ERROR, + + /** + * This member indicates the calling function failed, because the state + * of the controller is in a state that prevents successful completion. + */ + SCI_FAILURE_INVALID_STATE, + + /** + * This member indicates the calling function failed, because there is + * insufficient resources/memory to complete the request. + */ + SCI_FAILURE_INSUFFICIENT_RESOURCES, + + /** + * This member indicates the calling function failed, because the + * controller object required for the operation can't be located. + */ + SCI_FAILURE_CONTROLLER_NOT_FOUND, + + /** + * This member indicates the calling function failed, because the + * discovered controller type is not supported by the library. + */ + SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE, + + /** + * This member indicates the calling function failed, because the + * requested initialization data version isn't supported. + */ + SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION, + + /** + * This member indicates the calling function failed, because the + * requested configuration of SAS Phys into SAS Ports is not supported. + */ + SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION, + + /** + * This member indicates the calling function failed, because the + * requested protocol is not supported by the remote device, port, + * or controller. + */ + SCI_FAILURE_UNSUPPORTED_PROTOCOL, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE, + + /** + * This member indicates the calling function failed, because the + * device already exists. + */ + SCI_FAILURE_DEVICE_EXISTS, + + /** + * This member indicates the calling function failed, because adding + * a phy to the object is not possible. + */ + SCI_FAILURE_ADDING_PHY_UNSUPPORTED, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD, + + /** + * This member indicates the calling function failed, because the SCI + * implementation does not support the supplied time limit. + */ + SCI_FAILURE_UNSUPPORTED_TIME_LIMIT, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Phy. + */ + SCI_FAILURE_INVALID_PHY, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Port. + */ + SCI_FAILURE_INVALID_PORT, + + /** + * This member indicates the calling method was partly successful + * The port was reset but not all phys in port are operational + */ + SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS, + + /** + * This member indicates that calling method failed + * The port reset did not complete because none of the phys are operational + */ + SCI_FAILURE_RESET_PORT_FAILURE, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified remote device. + */ + SCI_FAILURE_INVALID_REMOTE_DEVICE, + + /** + * This member indicates the calling method failed, because the remote + * device is in a bad state and requires a reset. + */ + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain or support the specified IO tag. + */ + SCI_FAILURE_INVALID_IO_TAG, + + /** + * This member indicates that the operation failed and the user should + * check the response data associated with the IO. + */ + SCI_FAILURE_IO_RESPONSE_VALID, + + /** + * This member indicates that the operation failed, the failure is + * controller implementation specific, and the response data associated + * with the request is not valid. You can query for the controller + * specific error information via scic_controller_get_request_status() + */ + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + + /** + * This member indicated that the operation failed because the + * user requested this IO to be terminated. + */ + SCI_FAILURE_IO_TERMINATED, + + /** + * This member indicates that the operation failed and the associated + * request requires a SCSI abort task to be sent to the target. + */ + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + + /** + * This member indicates that the operation failed because the supplied + * device could not be located. + */ + SCI_FAILURE_DEVICE_NOT_FOUND, + + /** + * This member indicates that the operation failed because the + * objects association is required and is not correctly set. + */ + SCI_FAILURE_INVALID_ASSOCIATION, + + /** + * This member indicates that the operation failed, because a timeout + * occurred. + */ + SCI_FAILURE_TIMEOUT, + + /** + * This member indicates that the operation failed, because the user + * specified a value that is either invalid or not supported. + */ + SCI_FAILURE_INVALID_PARAMETER_VALUE, + + /** + * This value indicates that the operation failed, because the number + * of messages (MSI-X) is not supported. + */ + SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT, + + /** + * This value indicates that the method failed due to a lack of + * available NCQ tags. + */ + SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + + /** + * This value indicates that a protocol violation has occurred on the + * link. + */ + SCI_FAILURE_PROTOCOL_VIOLATION, + + /** + * This value indicates a failure condition that retry may help to clear. + */ + SCI_FAILURE_RETRY_REQUIRED, + + /** + * This field indicates the retry limit was reached when a retry is attempted + */ + SCI_FAILURE_RETRY_LIMIT_REACHED, + + /** + * This member indicates the calling method was partly successful. + * Mostly, this status is used when a LUN_RESET issued to an expander attached + * STP device in READY NCQ substate needs to have RNC suspended/resumed + * before posting TC. + */ + SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS, + + /** + * This field indicates an illegal phy connection based on the routing attribute + * of both expander phy attached to each other. + */ + SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION, + + /** + * This field indicates a CONFIG ROUTE INFO command has a response with function result + * INDEX DOES NOT EXIST, usually means exceeding max route index. + */ + SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX, + + /** + * This value indicates that an unsupported PCI device ID has been + * specified. This indicates that attempts to invoke + * scic_library_allocate_controller() will fail. + */ + SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID + +}; + +/** + * enum sci_io_status - This enumeration depicts all of the possible IO + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Add the API to retrieve the SCU status from the core. Check to see that the + * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL + * - SCI_IO_FAILURE_INVALID_IO_TAG + */ +enum sci_io_status { + SCI_IO_SUCCESS = SCI_SUCCESS, + SCI_IO_FAILURE = SCI_FAILURE, + SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY, + SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION, + + SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED, + SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED, + SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE +}; + +/** + * enum sci_task_status - This enumeration depicts all of the possible task + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Check to see that the following status are properly handled: + */ +enum sci_task_status { + SCI_TASK_SUCCESS = SCI_SUCCESS, + SCI_TASK_FAILURE = SCI_FAILURE, + SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG, + SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + + SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS + +}; extern unsigned char no_outbound_task_to; extern u16 ssp_max_occ_to; @@ -85,9 +533,9 @@ irqreturn_t isci_msix_isr(int vec, void *data); irqreturn_t isci_intx_isr(int vec, void *data); irqreturn_t isci_error_isr(int vec, void *data); +struct scic_sds_controller; bool scic_sds_controller_isr(struct scic_sds_controller *scic); void scic_sds_controller_completion_handler(struct scic_sds_controller *scic); bool scic_sds_controller_error_isr(struct scic_sds_controller *scic); void scic_sds_controller_error_handler(struct scic_sds_controller *scic); - #endif /* __ISCI_H__ */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 32800491832..1134395c970 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -54,10 +54,10 @@ */ #include "isci.h" +#include "host.h" #include "phy.h" #include "scic_port.h" #include "scic_config_parameters.h" -#include "core/scic_sds_phy.h" struct scic_sds_phy; extern enum sci_status scic_sds_phy_start(struct scic_sds_phy *sci_phy); diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 93ec2d4a9c3..3a95adbb508 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -52,16 +52,12 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - - #ifndef _ISCI_PHY_H_ #define _ISCI_PHY_H_ #include <scsi/sas.h> #include <scsi/libsas.h> #include "scic_sds_phy.h" -#include "port.h" -#include "host.h" struct isci_phy { struct scic_sds_phy sci; @@ -89,6 +85,7 @@ static inline struct isci_phy *sci_phy_to_iphy(struct scic_sds_phy *sci_phy) return iphy; } +struct isci_host; void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index); int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf); diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 355034542e4..59505cbf2bb 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -55,6 +55,9 @@ #ifndef _ISCI_PORT_H_ #define _ISCI_PORT_H_ + +#include <scsi/libsas.h> +#include "isci.h" #include "scic_sds_port.h" struct isci_phy; diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 933d81109ea..70551744044 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -32,7 +32,6 @@ #include "isci.h" #include "task.h" -#include "sci_controller_constants.h" #include "sci_environment.h" #include "probe_roms.h" diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index 55983363764..f4ef19ac281 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -58,6 +58,7 @@ #ifdef __KERNEL__ #include <linux/firmware.h> #include <linux/pci.h> +#include "isci.h" struct isci_orom *isci_request_oprom(struct pci_dev *pdev); diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 2e433b7f16b..a118f5873f6 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -56,7 +56,6 @@ #ifndef _ISCI_REMOTE_DEVICE_H_ #define _ISCI_REMOTE_DEVICE_H_ #include <scsi/libsas.h> -#include "sci_status.h" #include "scu_remote_node_context.h" #include "remote_node_context.h" #include "port.h" diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h index 9c02a6ccb2f..5737d9a30cc 100644 --- a/drivers/scsi/isci/remote_node_table.h +++ b/drivers/scsi/isci/remote_node_table.h @@ -56,14 +56,7 @@ #ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_ #define _SCIC_SDS_REMOTE_NODE_TABLE_H_ -/** - * This file contains the structures, constants and prototypes used for the - * remote node table. - * - * - */ - -#include "sci_controller_constants.h" +#include "isci.h" /** * diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 4961ee34709..a58072807a3 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -62,6 +62,7 @@ #include "sata.h" #include "scu_completion_codes.h" #include "scic_sds_request.h" +#include "scic_controller.h" #include "sas.h" static enum sci_status isci_request_ssp_request_construct( diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 89d8b0a27df..06786ece9bb 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -57,6 +57,7 @@ #define _ISCI_REQUEST_H_ #include "isci.h" +#include "host.h" #include "scic_sds_request.h" /** diff --git a/drivers/scsi/isci/sci_environment.h b/drivers/scsi/isci/sci_environment.h index 1806969fc43..30addba4d43 100644 --- a/drivers/scsi/isci/sci_environment.h +++ b/drivers/scsi/isci/sci_environment.h @@ -56,8 +56,7 @@ #ifndef _SCI_ENVIRONMENT_H_ #define _SCI_ENVIRONMENT_H_ -#include "isci.h" -#include "core/scic_sds_controller.h" +#include "host.h" static inline struct device *scic_to_dev(struct scic_sds_controller *scic) diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 7adaf71c19d..597c49020ac 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -65,6 +65,8 @@ #include "sata.h" #include "task.h" #include "scic_sds_request.h" +#include "scic_controller.h" +#include "timers.h" /** * isci_task_refuse() - complete the request to the upper layer driver in diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index ecc5f139506..c59dc9620cf 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -56,6 +56,7 @@ #define _ISCI_TASK_H_ #include <scsi/sas_ata.h> +#include "host.h" struct isci_request; diff --git a/drivers/scsi/isci/timers.c b/drivers/scsi/isci/timers.c index f33eff00dc0..007700e3eb6 100644 --- a/drivers/scsi/isci/timers.c +++ b/drivers/scsi/isci/timers.c @@ -55,6 +55,7 @@ #include "isci.h" #include "timers.h" +#include "host.h" /** * isci_timer_list_construct() - This method contrucst the SCI Timer List -- cgit v1.2.3-70-g09d2 From f1f52e75939b56c40b3d153ae99faf2720250242 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 10 May 2011 02:28:45 -0700 Subject: isci: uplevel request infrastructure * Consolidate tiny header files * Move files out of core/ (drop core/scic_sds_ prefix) * Merge core/scic_sds_request.[ch] into request.[ch] * Cleanup request.c namespace (clean forward declarations and global namespace pollution) Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/Makefile | 8 +- drivers/scsi/isci/core/sci_util.c | 86 - drivers/scsi/isci/core/sci_util.h | 97 - drivers/scsi/isci/core/scic_io_request.h | 226 -- drivers/scsi/isci/core/scic_sds_phy.c | 1 - drivers/scsi/isci/core/scic_sds_port.c | 2 +- drivers/scsi/isci/core/scic_sds_request.c | 1523 ---------- drivers/scsi/isci/core/scic_sds_request.h | 491 --- drivers/scsi/isci/core/scic_sds_smp_request.c | 520 ---- drivers/scsi/isci/core/scic_sds_smp_request.h | 67 - drivers/scsi/isci/core/scic_sds_ssp_request.c | 240 -- .../scsi/isci/core/scic_sds_stp_packet_request.h | 113 - drivers/scsi/isci/core/scic_sds_stp_pio_request.h | 104 - drivers/scsi/isci/core/scic_sds_stp_request.c | 1594 ---------- drivers/scsi/isci/core/scic_sds_stp_request.h | 178 -- drivers/scsi/isci/core/scic_task_request.h | 104 - drivers/scsi/isci/host.c | 2 - drivers/scsi/isci/isci.h | 19 + drivers/scsi/isci/port.c | 1 - drivers/scsi/isci/remote_device.c | 3 - drivers/scsi/isci/remote_node_context.c | 1 - drivers/scsi/isci/remote_node_table.c | 1 - drivers/scsi/isci/request.c | 3124 ++++++++++++++------ drivers/scsi/isci/request.h | 442 ++- drivers/scsi/isci/sata.c | 2 - drivers/scsi/isci/smp_request.c | 518 ++++ drivers/scsi/isci/ssp_request.c | 240 ++ drivers/scsi/isci/stp_request.c | 1590 ++++++++++ drivers/scsi/isci/stp_request.h | 195 ++ drivers/scsi/isci/task.c | 3 - drivers/scsi/isci/unsolicited_frame_control.c | 5 +- 31 files changed, 5248 insertions(+), 6252 deletions(-) delete mode 100644 drivers/scsi/isci/core/sci_util.c delete mode 100644 drivers/scsi/isci/core/sci_util.h delete mode 100644 drivers/scsi/isci/core/scic_io_request.h delete mode 100644 drivers/scsi/isci/core/scic_sds_request.c delete mode 100644 drivers/scsi/isci/core/scic_sds_request.h delete mode 100644 drivers/scsi/isci/core/scic_sds_smp_request.c delete mode 100644 drivers/scsi/isci/core/scic_sds_smp_request.h delete mode 100644 drivers/scsi/isci/core/scic_sds_ssp_request.c delete mode 100644 drivers/scsi/isci/core/scic_sds_stp_packet_request.h delete mode 100644 drivers/scsi/isci/core/scic_sds_stp_pio_request.h delete mode 100644 drivers/scsi/isci/core/scic_sds_stp_request.c delete mode 100644 drivers/scsi/isci/core/scic_sds_stp_request.h delete mode 100644 drivers/scsi/isci/core/scic_task_request.h create mode 100644 drivers/scsi/isci/smp_request.c create mode 100644 drivers/scsi/isci/ssp_request.c create mode 100644 drivers/scsi/isci/stp_request.c create mode 100644 drivers/scsi/isci/stp_request.h (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile index cfc640f9dc3..a7d1eb33eca 100644 --- a/drivers/scsi/isci/Makefile +++ b/drivers/scsi/isci/Makefile @@ -7,11 +7,9 @@ isci-objs := init.o phy.o request.o sata.o \ remote_node_context.o \ remote_node_table.o \ unsolicited_frame_control.o \ - core/scic_sds_request.o \ - core/scic_sds_stp_request.o \ + stp_request.o \ + ssp_request.o \ + smp_request.o \ core/scic_sds_port.o \ core/scic_sds_port_configuration_agent.o \ core/scic_sds_phy.o \ - core/scic_sds_ssp_request.o \ - core/scic_sds_smp_request.o \ - core/sci_util.o diff --git a/drivers/scsi/isci/core/sci_util.c b/drivers/scsi/isci/core/sci_util.c deleted file mode 100644 index 595d8da1abb..00000000000 --- a/drivers/scsi/isci/core/sci_util.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/kernel.h> -#include "sci_util.h" -#include "request.h" - -void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr) -{ - struct isci_request *ireq = sci_req_to_ireq(sci_req); - dma_addr_t offset; - - BUG_ON(phys_addr < ireq->request_daddr); - - offset = phys_addr - ireq->request_daddr; - - BUG_ON(offset >= sizeof(*ireq)); - - return (char *)ireq + offset; -} - -dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sds_request, - void *virt_addr) -{ - struct isci_request *isci_request = sci_req_to_ireq(sds_request); - - char *requested_addr = (char *)virt_addr; - char *base_addr = (char *)isci_request; - - BUG_ON(requested_addr < base_addr); - BUG_ON((requested_addr - base_addr) >= sizeof(*isci_request)); - - return isci_request->request_daddr + (requested_addr - base_addr); -} diff --git a/drivers/scsi/isci/core/sci_util.h b/drivers/scsi/isci/core/sci_util.h deleted file mode 100644 index 0f9dd0fe126..00000000000 --- a/drivers/scsi/isci/core/sci_util.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCI_UTIL_H_ -#define _SCI_UTIL_H_ - -#include "scic_sds_request.h" - -#define SCIC_BUILD_DWORD(char_buffer) \ - (\ - ((char_buffer)[0] << 24) \ - | ((char_buffer)[1] << 16) \ - | ((char_buffer)[2] << 8) \ - | ((char_buffer)[3]) \ - ) - -#define sci_cb_make_physical_address(physical_addr, addr_upper, addr_lower) \ - ((physical_addr) = (addr_lower) | ((u64)addr_upper) << 32) - -/** - * sci_swab32_cpy - convert between scsi and scu-hardware byte format - * @dest: receive the 4-byte endian swapped version of src - * @src: word aligned source buffer - * - * scu hardware handles SSP/SMP control, response, and unidentified - * frames in "big endian dword" order. Regardless of host endian this - * is always a swab32()-per-dword conversion of the standard definition, - * i.e. single byte fields swapped and multi-byte fields in little- - * endian - */ -static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt) -{ - u32 *dest = _dest, *src = _src; - - while (--word_cnt >= 0) - dest[word_cnt] = swab32(src[word_cnt]); -} - -void *scic_request_get_virt_addr(struct scic_sds_request *sds_request, - dma_addr_t phys_addr); - -dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sds_request, - void *virt_addr); - -#endif /* _SCI_UTIL_H_ */ diff --git a/drivers/scsi/isci/core/scic_io_request.h b/drivers/scsi/isci/core/scic_io_request.h deleted file mode 100644 index a4664cc3c57..00000000000 --- a/drivers/scsi/isci/core/scic_io_request.h +++ /dev/null @@ -1,226 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_IO_REQUEST_H_ -#define _SCIC_IO_REQUEST_H_ - -#include <linux/kernel.h> - -struct scic_sds_request; -struct scic_sds_remote_device; -struct scic_sds_controller; - -/** - * This enumeration specifies the transport protocol utilized for the request. - * - * - */ -typedef enum { - /** - * This enumeration constant indicates that no protocol has yet been - * set. - */ - SCIC_NO_PROTOCOL, - - /** - * This enumeration constant indicates that the protocol utilized - * is the Serial Management Protocol. - */ - SCIC_SMP_PROTOCOL, - - /** - * This enumeration constant indicates that the protocol utilized - * is the Serial SCSI Protocol. - */ - SCIC_SSP_PROTOCOL, - - /** - * This enumeration constant indicates that the protocol utilized - * is the Serial-ATA Tunneling Protocol. - */ - SCIC_STP_PROTOCOL - -} SCIC_TRANSPORT_PROTOCOL; - -enum sci_status scic_io_request_construct( - struct scic_sds_controller *scic_controller, - struct scic_sds_remote_device *scic_remote_device, - u16 io_tag, struct scic_sds_request *sci_req); - -/** - * scic_io_request_construct_basic_ssp() - This method is called by the SCI - * user to build an SSP IO request. - * @scic_io_request: This parameter specifies the handle to the io request - * object to be built. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the SSP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. - */ -enum sci_status scic_io_request_construct_basic_ssp( - struct scic_sds_request *scic_io_request); - - - - - -/** - * scic_io_request_construct_basic_sata() - This method is called by the SCI - * Core user to build an STP IO request. - * @scic_io_request: This parameter specifies the handle to the io request - * object to be built. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the STP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. - */ -enum sci_status scic_io_request_construct_basic_sata( - struct scic_sds_request *scic_io_request); - - - - -/** - * scic_io_request_construct_smp() - This method is called by the SCI user to - * build an SMP IO request. - * @scic_io_request: This parameter specifies the handle to the io request - * object to be built. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the SMP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. - */ -enum sci_status scic_io_request_construct_smp( - struct scic_sds_request *scic_io_request); - - - -/** - * scic_request_get_controller_status() - This method returns the controller - * specific IO/Task request status. These status values are unique to the - * specific controller being managed by the SCIC. - * @io_request: the handle to the IO or task management request object for - * which to retrieve the status. - * - * This method returns a value indicating the controller specific request - * status. - */ -u32 scic_request_get_controller_status( - struct scic_sds_request *io_request); - -/** - * scic_io_request_get_io_tag() - This method will return the IO tag utilized - * by the IO request. - * @scic_io_request: This parameter specifies the handle to the io request - * object for which to return the IO tag. - * - * An unsigned integer representing the IO tag being utilized. - * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if the IO does not - * currently have an IO tag allocated to it. All return other values indicate a - * legitimate tag. - */ -u16 scic_io_request_get_io_tag( - struct scic_sds_request *scic_io_request); - - -/** - * scic_stp_io_request_set_ncq_tag() - This method will assign an NCQ tag to - * the io request object. The caller of this function must make sure that - * only valid NCQ tags are assigned to the io request object. - * @scic_io_request: This parameter specifies the handle to the io request - * object to which to assign the ncq tag. - * @ncq_tag: This parameter specifies the NCQ tag to be utilized for the - * supplied core IO request. It is up to the user to make sure that this is - * a valid NCQ tag. - * - * none This function is only valid for SATA NCQ requests. - */ -void scic_stp_io_request_set_ncq_tag( - struct scic_sds_request *scic_io_request, - u16 ncq_tag); - -/** - * scic_io_request_get_number_of_bytes_transferred() - This method will return - * the number of bytes transferred from the SCU - * @scic_io_request: This parameter specifies the handle to the io request - * whose data length was not eqaul to the data length specified in the - * request. When the driver gets an early io completion status from the - * hardware, this routine should be called to get the actual number of bytes - * transferred - * - * The return is the number of bytes transferred when the data legth is not - * equal to the specified length in the io request - */ -u32 scic_io_request_get_number_of_bytes_transferred( - struct scic_sds_request *scic_io_request); - - -#endif /* _SCIC_IO_REQUEST_H_ */ - diff --git a/drivers/scsi/isci/core/scic_sds_phy.c b/drivers/scsi/isci/core/scic_sds_phy.c index 6b49d94bc9c..150509b0c69 100644 --- a/drivers/scsi/isci/core/scic_sds_phy.c +++ b/drivers/scsi/isci/core/scic_sds_phy.c @@ -61,7 +61,6 @@ #include "scic_sds_phy.h" #include "scic_sds_port.h" #include "remote_node_context.h" -#include "sci_util.h" #include "scu_event_codes.h" #include "timers.h" diff --git a/drivers/scsi/isci/core/scic_sds_port.c b/drivers/scsi/isci/core/scic_sds_port.c index 652917eeead..a9f3ce111b8 100644 --- a/drivers/scsi/isci/core/scic_sds_port.c +++ b/drivers/scsi/isci/core/scic_sds_port.c @@ -60,9 +60,9 @@ #include "scic_sds_port.h" #include "remote_device.h" #include "remote_node_context.h" -#include "scic_sds_request.h" #include "registers.h" #include "timers.h" +#include "scu_task_context.h" #define SCIC_SDS_PORT_MIN_TIMER_COUNT (SCI_MAX_PORTS) #define SCIC_SDS_PORT_MAX_TIMER_COUNT (SCI_MAX_PORTS) diff --git a/drivers/scsi/isci/core/scic_sds_request.c b/drivers/scsi/isci/core/scic_sds_request.c deleted file mode 100644 index cd279601de0..00000000000 --- a/drivers/scsi/isci/core/scic_sds_request.c +++ /dev/null @@ -1,1523 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "scic_io_request.h" -#include "registers.h" -#include "scic_sds_port.h" -#include "remote_device.h" -#include "scic_sds_request.h" -#include "scic_sds_smp_request.h" -#include "scic_sds_stp_request.h" -#include "unsolicited_frame_control.h" -#include "sci_util.h" -#include "scu_completion_codes.h" -#include "scu_task_context.h" -#include "request.h" -#include "task.h" - -/* - * **************************************************************************** - * * SCIC SDS IO REQUEST CONSTANTS - * **************************************************************************** */ - -/** - * - * - * We have no timer requirements for IO requests right now - */ -#define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0) -#define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0) - -/** - * This method returns the sgl element pair for the specificed sgl_pair index. - * @sci_req: This parameter specifies the IO request for which to retrieve - * the Scatter-Gather List element pair. - * @sgl_pair_index: This parameter specifies the index into the SGL element - * pair to be retrieved. - * - * This method returns a pointer to an struct scu_sgl_element_pair. - */ -static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( - struct scic_sds_request *sci_req, - u32 sgl_pair_index - ) { - struct scu_task_context *task_context; - - task_context = (struct scu_task_context *)sci_req->task_context_buffer; - - if (sgl_pair_index == 0) { - return &task_context->sgl_pair_ab; - } else if (sgl_pair_index == 1) { - return &task_context->sgl_pair_cd; - } - - return &sci_req->sg_table[sgl_pair_index - 2]; -} - -/** - * This function will build the SGL list for an IO request. - * @sci_req: This parameter specifies the IO request for which to build - * the Scatter-Gather List. - * - */ -void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) -{ - struct isci_request *isci_request = sci_req_to_ireq(sds_request); - struct isci_host *isci_host = isci_request->isci_host; - struct sas_task *task = isci_request_access_task(isci_request); - struct scatterlist *sg = NULL; - dma_addr_t dma_addr; - u32 sg_idx = 0; - struct scu_sgl_element_pair *scu_sg = NULL; - struct scu_sgl_element_pair *prev_sg = NULL; - - if (task->num_scatter > 0) { - sg = task->scatter; - - while (sg) { - scu_sg = scic_sds_request_get_sgl_element_pair( - sds_request, - sg_idx); - - SCU_SGL_COPY(scu_sg->A, sg); - - sg = sg_next(sg); - - if (sg) { - SCU_SGL_COPY(scu_sg->B, sg); - sg = sg_next(sg); - } else - SCU_SGL_ZERO(scu_sg->B); - - if (prev_sg) { - dma_addr = - scic_io_request_get_dma_addr( - sds_request, - scu_sg); - - prev_sg->next_pair_upper = - upper_32_bits(dma_addr); - prev_sg->next_pair_lower = - lower_32_bits(dma_addr); - } - - prev_sg = scu_sg; - sg_idx++; - } - } else { /* handle when no sg */ - scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, - sg_idx); - - dma_addr = dma_map_single(&isci_host->pdev->dev, - task->scatter, - task->total_xfer_len, - task->data_dir); - - isci_request->zero_scatter_daddr = dma_addr; - - scu_sg->A.length = task->total_xfer_len; - scu_sg->A.address_upper = upper_32_bits(dma_addr); - scu_sg->A.address_lower = lower_32_bits(dma_addr); - } - - if (scu_sg) { - scu_sg->next_pair_upper = 0; - scu_sg->next_pair_lower = 0; - } -} - -static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - -static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) -{ - struct ssp_cmd_iu *cmd_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - - cmd_iu = &sci_req->ssp.cmd; - - memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); - cmd_iu->add_cdb_len = 0; - cmd_iu->_r_a = 0; - cmd_iu->_r_b = 0; - cmd_iu->en_fburst = 0; /* unsupported */ - cmd_iu->task_prio = task->ssp_task.task_prio; - cmd_iu->task_attr = task->ssp_task.task_attr; - cmd_iu->_r_c = 0; - - sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, - sizeof(task->ssp_task.cdb) / sizeof(u32)); -} - -static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) -{ - struct ssp_task_iu *task_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - - task_iu = &sci_req->ssp.tmf; - - memset(task_iu, 0, sizeof(struct ssp_task_iu)); - - memcpy(task_iu->LUN, task->ssp_task.LUN, 8); - - task_iu->task_func = isci_tmf->tmf_code; - task_iu->task_tag = - (ireq->ttype == tmf_task) ? - isci_tmf->io_tag : - SCI_CONTROLLER_INVALID_IO_TAG; -} - -/** - * This method is will fill in the SCU Task Context for any type of SSP request. - * @sci_req: - * @task_context: - * - */ -static void scu_ssp_reqeust_construct_task_context( - struct scic_sds_request *sds_request, - struct scu_task_context *task_context) -{ - dma_addr_t dma_addr; - struct scic_sds_controller *controller; - struct scic_sds_remote_device *target_device; - struct scic_sds_port *target_port; - - controller = scic_sds_request_get_controller(sds_request); - target_device = scic_sds_request_get_device(sds_request); - target_port = scic_sds_request_get_port(sds_request); - - /* Fill in the TC with the its required data */ - task_context->abort = 0; - task_context->priority = 0; - task_context->initiator_request = 1; - task_context->connection_rate = target_device->connection_rate; - task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = - scic_sds_port_get_index(target_port); - task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; - task_context->valid = SCU_TASK_CONTEXT_VALID; - task_context->context_type = SCU_TASK_CONTEXT_TYPE; - - task_context->remote_node_index = - scic_sds_remote_device_get_index(sds_request->target_device); - task_context->command_code = 0; - - task_context->link_layer_control = 0; - task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 0; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - task_context->address_modifier = 0; - - /* task_context->type.ssp.tag = sci_req->io_tag; */ - task_context->task_phase = 0x01; - - if (sds_request->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sds_request->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - scic_sds_io_tag_get_index(sds_request->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data - * - * I/O tag index is not assigned because we have to wait - * until we get a TCi - */ - sds_request->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - owning_controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - - /* - * Copy the physical address for the command buffer to the - * SCU Task Context - */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.cmd); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); - - /* - * Copy the physical address for the response buffer to the - * SCU Task Context - */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.rsp); - - task_context->response_iu_upper = upper_32_bits(dma_addr); - task_context->response_iu_lower = lower_32_bits(dma_addr); -} - -/** - * This method is will fill in the SCU Task Context for a SSP IO request. - * @sci_req: - * - */ -static void scu_ssp_io_request_construct_task_context( - struct scic_sds_request *sci_req, - enum dma_data_direction dir, - u32 len) -{ - struct scu_task_context *task_context; - - task_context = scic_sds_request_get_task_context(sci_req); - - scu_ssp_reqeust_construct_task_context(sci_req, task_context); - - task_context->ssp_command_iu_length = - sizeof(struct ssp_cmd_iu) / sizeof(u32); - task_context->type.ssp.frame_type = SSP_COMMAND; - - switch (dir) { - case DMA_FROM_DEVICE: - case DMA_NONE: - default: - task_context->task_type = SCU_TASK_TYPE_IOREAD; - break; - case DMA_TO_DEVICE: - task_context->task_type = SCU_TASK_TYPE_IOWRITE; - break; - } - - task_context->transfer_length_bytes = len; - - if (task_context->transfer_length_bytes > 0) - scic_sds_request_build_sgl(sci_req); -} - -static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - -/** - * This method will fill in the SCU Task Context for a SSP Task request. The - * following important settings are utilized: -# priority == - * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued - * ahead of other task destined for the same Remote Node. -# task_type == - * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type - * (i.e. non-raw frame) is being utilized to perform task management. -# - * control_frame == 1. This ensures that the proper endianess is set so - * that the bytes are transmitted in the right order for a task frame. - * @sci_req: This parameter specifies the task request object being - * constructed. - * - */ -static void scu_ssp_task_request_construct_task_context( - struct scic_sds_request *sci_req) -{ - struct scu_task_context *task_context; - - task_context = scic_sds_request_get_task_context(sci_req); - - scu_ssp_reqeust_construct_task_context(sci_req, task_context); - - task_context->control_frame = 1; - task_context->priority = SCU_TASK_PRIORITY_HIGH; - task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; - task_context->transfer_length_bytes = 0; - task_context->type.ssp.frame_type = SSP_TASK; - task_context->ssp_command_iu_length = - sizeof(struct ssp_task_iu) / sizeof(u32); -} - - -/** - * This method constructs the SSP Command IU data for this ssp passthrough - * comand request object. - * @sci_req: This parameter specifies the request object for which the SSP - * command information unit is being built. - * - * enum sci_status, returns invalid parameter is cdb > 16 - */ - - -/** - * This method constructs the SATA request object. - * @sci_req: - * @sat_protocol: - * @transfer_length: - * @data_direction: - * @copy_rx_frame: - * - * enum sci_status - */ -static enum sci_status -scic_io_request_construct_sata(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir, - bool copy) -{ - enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - - /* check for management protocols */ - if (ireq->ttype == tmf_task) { - struct isci_tmf *tmf = isci_request_access_tmf(ireq); - - if (tmf->tmf_code == isci_tmf_sata_srst_high || - tmf->tmf_code == isci_tmf_sata_srst_low) - return scic_sds_stp_soft_reset_request_construct(sci_req); - else { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: Request 0x%p received un-handled SAT " - "management protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); - - return SCI_FAILURE; - } - } - - if (!sas_protocol_ata(task->task_proto)) { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: Non-ATA protocol in SATA path: 0x%x\n", - __func__, - task->task_proto); - return SCI_FAILURE; - - } - - /* non data */ - if (task->data_dir == DMA_NONE) - return scic_sds_stp_non_data_request_construct(sci_req); - - /* NCQ */ - if (task->ata_task.use_ncq) - return scic_sds_stp_ncq_request_construct(sci_req, len, dir); - - /* DMA */ - if (task->ata_task.dma_xfer) - return scic_sds_stp_udma_request_construct(sci_req, len, dir); - else /* PIO */ - return scic_sds_stp_pio_request_construct(sci_req, copy); - - return status; -} - -enum sci_status scic_io_request_construct_basic_ssp( - struct scic_sds_request *sci_req) -{ - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - - sci_req->protocol = SCIC_SSP_PROTOCOL; - - scu_ssp_io_request_construct_task_context(sci_req, - task->data_dir, - task->total_xfer_len); - - scic_sds_io_request_build_ssp_command_iu(sci_req); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - return SCI_SUCCESS; -} - - -enum sci_status scic_task_request_construct_ssp( - struct scic_sds_request *sci_req) -{ - /* Construct the SSP Task SCU Task Context */ - scu_ssp_task_request_construct_task_context(sci_req); - - /* Fill in the SSP Task IU */ - scic_sds_task_request_build_ssp_task_iu(sci_req); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - return SCI_SUCCESS; -} - - -enum sci_status scic_io_request_construct_basic_sata( - struct scic_sds_request *sci_req) -{ - enum sci_status status; - struct scic_sds_stp_request *stp_req; - bool copy = false; - struct isci_request *isci_request = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(isci_request); - - stp_req = &sci_req->stp.req; - sci_req->protocol = SCIC_STP_PROTOCOL; - - copy = (task->data_dir == DMA_NONE) ? false : true; - - status = scic_io_request_construct_sata(sci_req, - task->total_xfer_len, - task->data_dir, - copy); - - if (status == SCI_SUCCESS) - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - return status; -} - - -enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) -{ - enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - - /* check for management protocols */ - if (ireq->ttype == tmf_task) { - struct isci_tmf *tmf = isci_request_access_tmf(ireq); - - if (tmf->tmf_code == isci_tmf_sata_srst_high || - tmf->tmf_code == isci_tmf_sata_srst_low) { - status = scic_sds_stp_soft_reset_request_construct(sci_req); - } else { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: Request 0x%p received un-handled SAT " - "Protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); - - return SCI_FAILURE; - } - } - - if (status == SCI_SUCCESS) - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - return status; -} - - -u16 scic_io_request_get_io_tag( - struct scic_sds_request *sci_req) -{ - return sci_req->io_tag; -} - - -u32 scic_request_get_controller_status( - struct scic_sds_request *sci_req) -{ - return sci_req->scu_status; -} - -#define SCU_TASK_CONTEXT_SRAM 0x200000 -u32 scic_io_request_get_number_of_bytes_transferred( - struct scic_sds_request *scic_sds_request) -{ - struct scic_sds_controller *scic = scic_sds_request->owning_controller; - u32 ret_val = 0; - - if (readl(&scic->smu_registers->address_modifier) == 0) { - void __iomem *scu_reg_base = scic->scu_registers; - /* - * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where - * BAR1 is the scu_registers - * 0x20002C = 0x200000 + 0x2c - * = start of task context SRAM + offset of (type.ssp.data_offset) - * TCi is the io_tag of struct scic_sds_request */ - ret_val = readl(scu_reg_base + - (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + - ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(scic_sds_request->io_tag))); - } - - return ret_val; -} - - -/* - * **************************************************************************** - * * SCIC SDS Interface Implementation - * **************************************************************************** */ - -enum sci_status -scic_sds_request_start(struct scic_sds_request *request) -{ - if (request->device_sequence != - scic_sds_remote_device_get_sequence(request->target_device)) - return SCI_FAILURE; - - if (request->state_handlers->start_handler) - return request->state_handlers->start_handler(request); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request requested to start while in wrong " - "state %d\n", - __func__, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status -scic_sds_io_request_terminate(struct scic_sds_request *request) -{ - if (request->state_handlers->abort_handler) - return request->state_handlers->abort_handler(request); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request requested to abort while in wrong " - "state %d\n", - __func__, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status -scic_sds_io_request_complete(struct scic_sds_request *request) -{ - if (request->state_handlers->complete_handler) - return request->state_handlers->complete_handler(request); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request requested to complete while in wrong " - "state %d\n", - __func__, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status scic_sds_io_request_event_handler( - struct scic_sds_request *request, - u32 event_code) -{ - if (request->state_handlers->event_handler) - return request->state_handlers->event_handler(request, event_code); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given event code notification %x while " - "in wrong state %d\n", - __func__, - event_code, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - -enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) -{ - if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && - request->has_started_substate_machine == false) - return scic_sds_request_started_state_tc_completion_handler(request, completion_code); - else if (request->state_handlers->tc_completion_handler) - return request->state_handlers->tc_completion_handler(request, completion_code); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given task completion notification %x " - "while in wrong state %d\n", - __func__, - completion_code, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; - -} - - -/** - * - * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start - * operation is to be executed. - * @frame_index: The frame index returned by the hardware for the reqeust - * object. - * - * This method invokes the core state frame handler for the - * SCIC_SDS_IO_REQUEST_T object. enum sci_status - */ -enum sci_status scic_sds_io_request_frame_handler( - struct scic_sds_request *request, - u32 frame_index) -{ - if (request->state_handlers->frame_handler) - return request->state_handlers->frame_handler(request, frame_index); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given unexpected frame %x while in " - "state %d\n", - __func__, - frame_index, - sci_base_state_machine_get_state(&request->state_machine)); - - scic_sds_controller_release_frame(request->owning_controller, frame_index); - return SCI_FAILURE_INVALID_STATE; -} - -/* - * This function copies response data for requests returning response data - * instead of sense data. - * @sci_req: This parameter specifies the request object for which to copy - * the response data. - */ -void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) -{ - void *resp_buf; - u32 len; - struct ssp_response_iu *ssp_response; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - - ssp_response = &sci_req->ssp.rsp; - - resp_buf = &isci_tmf->resp.resp_iu; - - len = min_t(u32, - SSP_RESP_IU_MAX_SIZE, - be32_to_cpu(ssp_response->response_data_len)); - - memcpy(resp_buf, ssp_response->resp_data, len); -} - -/* - * ***************************************************************************** - * * CONSTRUCTED STATE HANDLERS - * ***************************************************************************** */ - -/* - * This method implements the action taken when a constructed - * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request. - * This method will, if necessary, allocate a TCi for the io request object and - * then will, if necessary, copy the constructed TC data into the actual TC - * buffer. If everything is successful the post context field is updated with - * the TCi so the controller can post the request to the hardware. enum sci_status - * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES - */ -static enum sci_status scic_sds_request_constructed_state_start_handler( - struct scic_sds_request *request) -{ - struct scu_task_context *task_context; - - if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - request->io_tag = - scic_controller_allocate_io_tag(request->owning_controller); - } - - /* Record the IO Tag in the request */ - if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { - task_context = request->task_context_buffer; - - task_context->task_index = scic_sds_io_tag_get_index(request->io_tag); - - switch (task_context->protocol_type) { - case SCU_TASK_CONTEXT_PROTOCOL_SMP: - case SCU_TASK_CONTEXT_PROTOCOL_SSP: - /* SSP/SMP Frame */ - task_context->type.ssp.tag = request->io_tag; - task_context->type.ssp.target_port_transfer_tag = 0xFFFF; - break; - - case SCU_TASK_CONTEXT_PROTOCOL_STP: - /* - * STP/SATA Frame - * task_context->type.stp.ncq_tag = request->ncq_tag; */ - break; - - case SCU_TASK_CONTEXT_PROTOCOL_NONE: - /* / @todo When do we set no protocol type? */ - break; - - default: - /* This should never happen since we build the IO requests */ - break; - } - - /* - * Check to see if we need to copy the task context buffer - * or have been building into the task context buffer */ - if (request->was_tag_assigned_by_user == false) { - scic_sds_controller_copy_task_context( - request->owning_controller, request); - } - - /* Add to the post_context the io tag value */ - request->post_context |= scic_sds_io_tag_get_index(request->io_tag); - - /* Everything is good go ahead and change state */ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_STARTED); - - return SCI_SUCCESS; - } - - return SCI_FAILURE_INSUFFICIENT_RESOURCES; -} - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. Since the request - * has not yet been posted to the hardware the request transitions to the - * completed state. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_constructed_state_abort_handler( - struct scic_sds_request *request) -{ - /* - * This request has been terminated by the user make sure that the correct - * status code is returned */ - scic_sds_request_set_status(request, - SCU_TASK_DONE_TASK_ABORT, - SCI_FAILURE_IO_TERMINATED); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -/* - * ***************************************************************************** - * * STARTED STATE HANDLERS - * ***************************************************************************** */ - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. Since the request - * has been posted to the hardware the io request state is changed to the - * aborting state. enum sci_status SCI_SUCCESS - */ -enum sci_status scic_sds_request_started_state_abort_handler( - struct scic_sds_request *request) -{ - if (request->has_started_substate_machine) - sci_base_state_machine_stop(&request->started_substate_machine); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - return SCI_SUCCESS; -} - -/* - * scic_sds_request_started_state_tc_completion_handler() - This method process - * TC (task context) completions for normal IO request (i.e. Task/Abort - * Completions of type 0). This method will update the - * SCIC_SDS_IO_REQUEST_T::status field. - * @sci_req: This parameter specifies the request for which a completion - * occurred. - * @completion_code: This parameter specifies the completion code received from - * the SCU. - * - */ -enum sci_status -scic_sds_request_started_state_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - u8 datapres; - struct ssp_response_iu *resp_iu; - - /* - * TODO: Any SDMA return code of other than 0 is bad - * decode 0x003C0000 to determine SDMA status - */ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): - { - /* - * There are times when the SCU hardware will return an early - * response because the io request specified more data than is - * returned by the target device (mode pages, inquiry data, - * etc.). We must check the response stats to see if this is - * truly a failed request or a good request that just got - * completed early. - */ - struct ssp_response_iu *resp = &sci_req->ssp.rsp; - ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, - word_cnt); - - if (resp->status == 0) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS_IO_DONE_EARLY); - } else { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - } - } - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): - { - ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, - word_cnt); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - } - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): - /* - * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame - * guaranteed to be received before this completion status is - * posted? - */ - resp_iu = &sci_req->ssp.rsp; - datapres = resp_iu->datapres; - - if ((datapres == 0x01) || (datapres == 0x02)) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - } else - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - break; - - /* only stp device gets suspended. */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): - if (sci_req->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status( - sci_req, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); - } else { - scic_sds_request_set_status( - sci_req, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - } - break; - - /* both stp/ssp device gets suspended */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status( - sci_req, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); - break; - - /* neither ssp nor stp gets suspended. */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): - default: - scic_sds_request_set_status( - sci_req, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - break; - } - - /* - * TODO: This is probably wrong for ACK/NAK timeout conditions - */ - - /* In all cases we will treat this as the completion of the IO req. */ - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_frame_handler() request. This method - * first determines the frame type received. If this is a response frame then - * the response data is copied to the io request response buffer for processing - * at completion time. If the frame type is not a response buffer an error is - * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE - */ -static enum sci_status -scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - u32 *frame_header; - struct ssp_frame_hdr ssp_hdr; - ssize_t word_cnt; - - status = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - (void **)&frame_header); - - word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); - sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); - - if (ssp_hdr.frame_type == SSP_RESPONSE) { - struct ssp_response_iu *resp_iu; - ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - - status = scic_sds_unsolicited_frame_control_get_buffer( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - (void **)&resp_iu); - - sci_swab32_cpy(&sci_req->ssp.rsp, - resp_iu, word_cnt); - - resp_iu = &sci_req->ssp.rsp; - - if ((resp_iu->datapres == 0x01) || - (resp_iu->datapres == 0x02)) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - } else - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - } else { - /* This was not a response frame why did it get forwarded? */ - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC IO Request 0x%p received unexpected " - "frame %d type 0x%02x\n", - __func__, - sci_req, - frame_index, - ssp_hdr.frame_type); - } - - /* - * In any case we are done with this frame buffer return it to the - * controller - */ - scic_sds_controller_release_frame( - sci_req->owning_controller, frame_index); - - return SCI_SUCCESS; -} - -/* - * ***************************************************************************** - * * COMPLETED STATE HANDLERS - * ***************************************************************************** */ - - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_complete() request. This method frees up - * any io request resources that have been allocated and transitions the - * request to its final state. Consider stopping the state machine instead of - * transitioning to the final state? enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_completed_state_complete_handler( - struct scic_sds_request *request) -{ - if (request->was_tag_assigned_by_user != true) { - scic_controller_free_io_tag( - request->owning_controller, request->io_tag); - } - - if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) { - scic_sds_controller_release_frame( - request->owning_controller, request->saved_rx_frame_index); - } - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_FINAL); - return SCI_SUCCESS; -} - -/* - * ***************************************************************************** - * * ABORTING STATE HANDLERS - * ***************************************************************************** */ - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. This method is the - * io request aborting state abort handlers. On receipt of a multiple - * terminate requests the io request will transition to the completed state. - * This should not happen in normal operation. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_abort_handler( - struct scic_sds_request *request) -{ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_task_completion() request. This method - * decodes the completion type waiting for the abort task complete - * notification. When the abort task complete is received the io request - * transitions to the completed state. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): - case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED - ); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - default: - /* - * Unless we get some strange error wait for the task abort to complete - * TODO: Should there be a state change for this completion? */ - break; - } - - return SCI_SUCCESS; -} - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_frame_handler() request. This method - * discards the unsolicited frame since we are waiting for the abort task - * completion. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */ - - scic_sds_controller_release_frame( - sci_req->owning_controller, frame_index); - - return SCI_SUCCESS; -} - -static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { - }, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { - .start_handler = scic_sds_request_constructed_state_start_handler, - .abort_handler = scic_sds_request_constructed_state_abort_handler, - }, - [SCI_BASE_REQUEST_STATE_STARTED] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, - .frame_handler = scic_sds_request_started_state_frame_handler, - }, - [SCI_BASE_REQUEST_STATE_COMPLETED] = { - .complete_handler = scic_sds_request_completed_state_complete_handler, - }, - [SCI_BASE_REQUEST_STATE_ABORTING] = { - .abort_handler = scic_sds_request_aborting_state_abort_handler, - .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, - .frame_handler = scic_sds_request_aborting_state_frame_handler, - }, - [SCI_BASE_REQUEST_STATE_FINAL] = { - }, -}; - -/** - * scic_sds_request_initial_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial - * base request is constructed. Entry into the initial state sets all handlers - * for the io request object to their default handlers. none - */ -static void scic_sds_request_initial_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_INITIAL - ); -} - -/** - * scic_sds_request_constructed_state_enter() - - * @object: The io request object that is to enter the constructed state. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers - * for the the constructed state. none - */ -static void scic_sds_request_constructed_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_CONSTRUCTED - ); -} - -/** - * scic_sds_request_started_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a - * SCSI Task request we must enter the started substate machine. none - */ -static void scic_sds_request_started_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_STARTED - ); - - /* - * Most of the request state machines have a started substate machine so - * start its execution on the entry to the started state. */ - if (sci_req->has_started_substate_machine == true) - sci_base_state_machine_start(&sci_req->started_substate_machine); -} - -/** - * scic_sds_request_started_state_exit() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when exiting the - * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be - * to stop the started substate machine. none - */ -static void scic_sds_request_started_state_exit(void *object) -{ - struct scic_sds_request *sci_req = object; - - if (sci_req->has_started_substate_machine == true) - sci_base_state_machine_stop(&sci_req->started_substate_machine); -} - -/** - * scic_sds_request_completed_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the - * SCIC_SDS_IO_REQUEST has completed. The method will decode the request - * completion status and convert it to an enum sci_status to return in the - * completion callback function. none - */ -static void scic_sds_request_completed_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - struct scic_sds_controller *scic = - scic_sds_request_get_controller(sci_req); - struct isci_host *ihost = scic_to_ihost(scic); - struct isci_request *ireq = sci_req_to_ireq(sci_req); - - SET_STATE_HANDLER(sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Tell the SCI_USER that the IO request is complete */ - if (sci_req->is_task_management_request == false) - isci_request_io_request_complete(ihost, - ireq, - sci_req->sci_status); - else - isci_task_request_complete(ihost, ireq, sci_req->sci_status); -} - -/** - * scic_sds_request_aborting_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_ABORTING state. none - */ -static void scic_sds_request_aborting_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - /* Setting the abort bit in the Task Context is required by the silicon. */ - sci_req->task_context_buffer->abort = 1; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_ABORTING - ); -} - -/** - * scic_sds_request_final_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the - * state handlers in place. none - */ -static void scic_sds_request_final_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_FINAL - ); -} - -static const struct sci_base_state scic_sds_request_state_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { - .enter_state = scic_sds_request_initial_state_enter, - }, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { - .enter_state = scic_sds_request_constructed_state_enter, - }, - [SCI_BASE_REQUEST_STATE_STARTED] = { - .enter_state = scic_sds_request_started_state_enter, - .exit_state = scic_sds_request_started_state_exit - }, - [SCI_BASE_REQUEST_STATE_COMPLETED] = { - .enter_state = scic_sds_request_completed_state_enter, - }, - [SCI_BASE_REQUEST_STATE_ABORTING] = { - .enter_state = scic_sds_request_aborting_state_enter, - }, - [SCI_BASE_REQUEST_STATE_FINAL] = { - .enter_state = scic_sds_request_final_state_enter, - }, -}; - -static void scic_sds_general_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) -{ - sci_base_state_machine_construct(&sci_req->state_machine, sci_req, - scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL); - sci_base_state_machine_start(&sci_req->state_machine); - - sci_req->io_tag = io_tag; - sci_req->owning_controller = scic; - sci_req->target_device = sci_dev; - sci_req->has_started_substate_machine = false; - sci_req->protocol = SCIC_NO_PROTOCOL; - sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; - sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); - - sci_req->sci_status = SCI_SUCCESS; - sci_req->scu_status = 0; - sci_req->post_context = 0xFFFFFFFF; - - sci_req->is_task_management_request = false; - - if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - sci_req->was_tag_assigned_by_user = false; - sci_req->task_context_buffer = NULL; - } else { - sci_req->was_tag_assigned_by_user = true; - - sci_req->task_context_buffer = - scic_sds_controller_get_task_context_buffer(scic, io_tag); - } -} - -enum sci_status -scic_io_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) -{ - struct domain_device *dev = sci_dev_to_domain(sci_dev); - enum sci_status status = SCI_SUCCESS; - - /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); - - if (sci_dev->rnc.remote_node_index == - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) - return SCI_FAILURE_INVALID_REMOTE_DEVICE; - - if (dev->dev_type == SAS_END_DEV) - scic_sds_ssp_io_request_assign_buffers(sci_req); - else if ((dev->dev_type == SATA_DEV) || - (dev->tproto & SAS_PROTOCOL_STP)) { - scic_sds_stp_request_assign_buffers(sci_req); - memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); - } else if (dev_is_expander(dev)) { - scic_sds_smp_request_assign_buffers(sci_req); - memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); - } else - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - - if (status == SCI_SUCCESS) { - memset(sci_req->task_context_buffer, 0, - offsetof(struct scu_task_context, sgl_pair_ab)); - } - - return status; -} - -enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) -{ - struct domain_device *dev = sci_dev_to_domain(sci_dev); - enum sci_status status = SCI_SUCCESS; - - /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); - - if (dev->dev_type == SAS_END_DEV) { - scic_sds_ssp_task_request_assign_buffers(sci_req); - - sci_req->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_io_request_started_task_mgmt_substate_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); - } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) - scic_sds_stp_request_assign_buffers(sci_req); - else - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - - if (status == SCI_SUCCESS) { - sci_req->is_task_management_request = true; - memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); - } - - return status; -} diff --git a/drivers/scsi/isci/core/scic_sds_request.h b/drivers/scsi/isci/core/scic_sds_request.h deleted file mode 100644 index a8d74a15a86..00000000000 --- a/drivers/scsi/isci/core/scic_sds_request.h +++ /dev/null @@ -1,491 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_SDS_IO_REQUEST_H_ -#define _SCIC_SDS_IO_REQUEST_H_ - -#include "isci.h" -#include "scic_io_request.h" -#include "state_machine.h" -#include "scu_task_context.h" -#include "scic_sds_stp_request.h" -#include "sas.h" - -struct scic_sds_controller; -struct scic_sds_remote_device; -struct scic_sds_io_request_state_handler; - -/** - * enum _scic_sds_io_request_started_task_mgmt_substates - This enumeration - * depicts all of the substates for a task management request to be - * performed in the STARTED super-state. - * - * - */ -enum scic_sds_raw_request_started_task_mgmt_substates { - /** - * The AWAIT_TC_COMPLETION sub-state indicates that the started raw - * task management request is waiting for the transmission of the - * initial frame (i.e. command, task, etc.). - */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION, - - /** - * This sub-state indicates that the started task management request - * is waiting for the reception of an unsolicited frame - * (i.e. response IU). - */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, -}; - - -/** - * enum _scic_sds_smp_request_started_substates - This enumeration depicts all - * of the substates for a SMP request to be performed in the STARTED - * super-state. - * - * - */ -enum scic_sds_smp_request_started_substates { - /** - * This sub-state indicates that the started task management request - * is waiting for the reception of an unsolicited frame - * (i.e. response IU). - */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE, - - /** - * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is - * waiting for the transmission of the initial frame (i.e. command, task, etc.). - */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, -}; - -struct scic_sds_request { - /** - * This field contains the information for the base request state machine. - */ - struct sci_base_state_machine state_machine; - - /** - * This field simply points to the controller to which this IO request - * is associated. - */ - struct scic_sds_controller *owning_controller; - - /** - * This field simply points to the remote device to which this IO request - * is associated. - */ - struct scic_sds_remote_device *target_device; - - /** - * This field is utilized to determine if the SCI user is managing - * the IO tag for this request or if the core is managing it. - */ - bool was_tag_assigned_by_user; - - /** - * This field indicates the IO tag for this request. The IO tag is - * comprised of the task_index and a sequence count. The sequence count - * is utilized to help identify tasks from one life to another. - */ - u16 io_tag; - - /** - * This field specifies the protocol being utilized for this - * IO request. - */ - SCIC_TRANSPORT_PROTOCOL protocol; - - /** - * This field indicates the completion status taken from the SCUs - * completion code. It indicates the completion result for the SCU hardware. - */ - u32 scu_status; - - /** - * This field indicates the completion status returned to the SCI user. It - * indicates the users view of the io request completion. - */ - u32 sci_status; - - /** - * This field contains the value to be utilized when posting (e.g. Post_TC, - * Post_TC_Abort) this request to the silicon. - */ - u32 post_context; - - struct scu_task_context *task_context_buffer; - struct scu_task_context tc ____cacheline_aligned; - - /* could be larger with sg chaining */ - #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) - struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); - - /** - * This field indicates if this request is a task management request or - * normal IO request. - */ - bool is_task_management_request; - - /** - * This field indicates that this request contains an initialized started - * substate machine. - */ - bool has_started_substate_machine; - - /** - * This field is a pointer to the stored rx frame data. It is used in STP - * internal requests and SMP response frames. If this field is non-NULL the - * saved frame must be released on IO request completion. - * - * @todo In the future do we want to keep a list of RX frame buffers? - */ - u32 saved_rx_frame_index; - - /** - * This field specifies the data necessary to manage the sub-state - * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state. - */ - struct sci_base_state_machine started_substate_machine; - - /** - * This field specifies the current state handlers in place for this - * IO Request object. This field is updated each time the request - * changes state. - */ - const struct scic_sds_io_request_state_handler *state_handlers; - - /** - * This field in the recorded device sequence for the io request. This is - * recorded during the build operation and is compared in the start - * operation. If the sequence is different then there was a change of - * devices from the build to start operations. - */ - u8 device_sequence; - - union { - struct { - union { - struct ssp_cmd_iu cmd; - struct ssp_task_iu tmf; - }; - union { - struct ssp_response_iu rsp; - u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; - }; - } ssp; - - struct { - struct smp_req cmd; - struct smp_resp rsp; - } smp; - - struct { - struct scic_sds_stp_request req; - struct host_to_dev_fis cmd; - struct dev_to_host_fis rsp; - } stp; - }; - -}; - -static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req) -{ - struct scic_sds_request *sci_req; - - sci_req = container_of(stp_req, typeof(*sci_req), stp.req); - return sci_req; -} - -/** - * enum sci_base_request_states - This enumeration depicts all the states for - * the common request state machine. - * - * - */ -enum sci_base_request_states { - /** - * Simply the initial state for the base request state machine. - */ - SCI_BASE_REQUEST_STATE_INITIAL, - - /** - * This state indicates that the request has been constructed. This state - * is entered from the INITIAL state. - */ - SCI_BASE_REQUEST_STATE_CONSTRUCTED, - - /** - * This state indicates that the request has been started. This state is - * entered from the CONSTRUCTED state. - */ - SCI_BASE_REQUEST_STATE_STARTED, - - /** - * This state indicates that the request has completed. - * This state is entered from the STARTED state. This state is entered from - * the ABORTING state. - */ - SCI_BASE_REQUEST_STATE_COMPLETED, - - /** - * This state indicates that the request is in the process of being - * terminated/aborted. - * This state is entered from the CONSTRUCTED state. - * This state is entered from the STARTED state. - */ - SCI_BASE_REQUEST_STATE_ABORTING, - - /** - * Simply the final state for the base request state machine. - */ - SCI_BASE_REQUEST_STATE_FINAL, -}; - -typedef enum sci_status (*scic_sds_io_request_handler_t) - (struct scic_sds_request *request); -typedef enum sci_status (*scic_sds_io_request_frame_handler_t) - (struct scic_sds_request *req, u32 frame); -typedef enum sci_status (*scic_sds_io_request_event_handler_t) - (struct scic_sds_request *req, u32 event); -typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t) - (struct scic_sds_request *req, u32 completion_code); - -/** - * struct scic_sds_io_request_state_handler - This is the SDS core definition - * of the state handlers. - * - * - */ -struct scic_sds_io_request_state_handler { - /** - * The start_handler specifies the method invoked when a user attempts to - * start a request. - */ - scic_sds_io_request_handler_t start_handler; - - /** - * The abort_handler specifies the method invoked when a user attempts to - * abort a request. - */ - scic_sds_io_request_handler_t abort_handler; - - /** - * The complete_handler specifies the method invoked when a user attempts to - * complete a request. - */ - scic_sds_io_request_handler_t complete_handler; - - scic_sds_io_request_task_completion_handler_t tc_completion_handler; - scic_sds_io_request_event_handler_t event_handler; - scic_sds_io_request_frame_handler_t frame_handler; - -}; - -extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[]; - -/** - * scic_sds_request_get_controller() - - * - * This macro will return the controller for this io request object - */ -#define scic_sds_request_get_controller(sci_req) \ - ((sci_req)->owning_controller) - -/** - * scic_sds_request_get_device() - - * - * This macro will return the device for this io request object - */ -#define scic_sds_request_get_device(sci_req) \ - ((sci_req)->target_device) - -/** - * scic_sds_request_get_port() - - * - * This macro will return the port for this io request object - */ -#define scic_sds_request_get_port(sci_req) \ - scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req)) - -/** - * scic_sds_request_get_post_context() - - * - * This macro returns the constructed post context result for the io request. - */ -#define scic_sds_request_get_post_context(sci_req) \ - ((sci_req)->post_context) - -/** - * scic_sds_request_get_task_context() - - * - * This is a helper macro to return the os handle for this request object. - */ -#define scic_sds_request_get_task_context(request) \ - ((request)->task_context_buffer) - -/** - * scic_sds_request_set_status() - - * - * This macro will set the scu hardware status and sci request completion - * status for an io request. - */ -#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \ - { \ - (request)->scu_status = (scu_status_code); \ - (request)->sci_status = (sci_status_code); \ - } - -#define scic_sds_request_complete(a_request) \ - ((a_request)->state_handlers->complete_handler(a_request)) - - -extern enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code); - -/** - * SCU_SGL_ZERO() - - * - * This macro zeros the hardware SGL element data - */ -#define SCU_SGL_ZERO(scu_sge) \ - { \ - (scu_sge).length = 0; \ - (scu_sge).address_lower = 0; \ - (scu_sge).address_upper = 0; \ - (scu_sge).address_modifier = 0; \ - } - -/** - * SCU_SGL_COPY() - - * - * This macro copys the SGL Element data from the host os to the hardware SGL - * elment data - */ -#define SCU_SGL_COPY(scu_sge, os_sge) \ - { \ - (scu_sge).length = sg_dma_len(sg); \ - (scu_sge).address_upper = \ - upper_32_bits(sg_dma_address(sg)); \ - (scu_sge).address_lower = \ - lower_32_bits(sg_dma_address(sg)); \ - (scu_sge).address_modifier = 0; \ - } - -/** - * scic_sds_request_get_user_request() - - * - * This is a helper macro to return the os handle for this request object. - */ -#define scic_sds_request_get_user_request(request) \ - ((request)->user_request) - -/* - * ***************************************************************************** - * * CORE REQUEST PROTOTYPES - * ***************************************************************************** */ - -void scic_sds_request_build_sgl( - struct scic_sds_request *sci_req); - - - -void scic_sds_stp_request_assign_buffers( - struct scic_sds_request *sci_req); - -void scic_sds_smp_request_assign_buffers( - struct scic_sds_request *sci_req); - -/* --------------------------------------------------------------------------- */ - -enum sci_status scic_sds_request_start( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_io_request_terminate( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_io_request_complete( - struct scic_sds_request *sci_req); - -void scic_sds_io_request_copy_response( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_io_request_event_handler( - struct scic_sds_request *sci_req, - u32 event_code); - -enum sci_status scic_sds_io_request_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index); - - -enum sci_status scic_sds_task_request_terminate( - struct scic_sds_request *sci_req); - -/* - * ***************************************************************************** - * * STARTED STATE HANDLERS - * ***************************************************************************** */ - -enum sci_status scic_sds_request_started_state_abort_handler( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_request_started_state_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code); - -#endif /* _SCIC_SDS_IO_REQUEST_H_ */ diff --git a/drivers/scsi/isci/core/scic_sds_smp_request.c b/drivers/scsi/isci/core/scic_sds_smp_request.c deleted file mode 100644 index 6a2b65b9cf0..00000000000 --- a/drivers/scsi/isci/core/scic_sds_smp_request.c +++ /dev/null @@ -1,520 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "state_machine.h" -#include "remote_device.h" -#include "scic_sds_request.h" -#include "scic_sds_smp_request.h" -#include "sci_util.h" -#include "scu_completion_codes.h" -#include "scu_task_context.h" -#include "host.h" - -static void scu_smp_request_construct_task_context( - struct scic_sds_request *sci_req, - struct smp_req *smp_req); - -void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - -/* - * This function will fill in the SCU Task Context for a SMP request. The - * following important settings are utilized: -# task_type == - * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type - * (i.e. non-raw frame) is being utilized to perform task management. -# - * control_frame == 1. This ensures that the proper endianess is set so - * that the bytes are transmitted in the right order for a smp request frame. - * @sci_req: This parameter specifies the smp request object being - * constructed. - * - */ -static void -scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, - struct smp_req *smp_req) -{ - dma_addr_t dma_addr; - struct scic_sds_controller *scic; - struct scic_sds_remote_device *sci_dev; - struct scic_sds_port *sci_port; - struct scu_task_context *task_context; - ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); - - /* byte swap the smp request. */ - sci_swab32_cpy(&sci_req->smp.cmd, smp_req, - word_cnt); - - task_context = scic_sds_request_get_task_context(sci_req); - - scic = scic_sds_request_get_controller(sci_req); - sci_dev = scic_sds_request_get_device(sci_req); - sci_port = scic_sds_request_get_port(sci_req); - - /* - * Fill in the TC with the its required data - * 00h - */ - task_context->priority = 0; - task_context->initiator_request = 1; - task_context->connection_rate = sci_dev->connection_rate; - task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(scic); - task_context->logical_port_index = scic_sds_port_get_index(sci_port); - task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; - task_context->abort = 0; - task_context->valid = SCU_TASK_CONTEXT_VALID; - task_context->context_type = SCU_TASK_CONTEXT_TYPE; - - /* 04h */ - task_context->remote_node_index = sci_dev->rnc.remote_node_index; - task_context->command_code = 0; - task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; - - /* 08h */ - task_context->link_layer_control = 0; - task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 1; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - /* 0ch */ - task_context->address_modifier = 0; - - /* 10h */ - task_context->ssp_command_iu_length = smp_req->req_len; - - /* 14h */ - task_context->transfer_length_bytes = 0; - - /* - * 18h ~ 30h, protocol specific - * since commandIU has been build by framework at this point, we just - * copy the frist DWord from command IU to this location. */ - memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); - - /* - * 40h - * "For SMP you could program it to zero. We would prefer that way - * so that done code will be consistent." - Venki - */ - task_context->task_phase = 0; - - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - scic_sds_io_tag_get_index(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - - /* - * Copy the physical address for the command buffer to the SCU Task - * Context command buffer should not contain command header. - */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->smp.cmd) + - sizeof(u32)); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); - - /* SMP response comes as UF, so no need to set response IU address. */ - task_context->response_iu_upper = 0; - task_context->response_iu_lower = 0; -} - -/* - * This function processes an unsolicited frame while the SMP request is waiting - * for a response frame. It will copy the response data, release the - * unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This function returns an indication of whether the response frame was handled - * successfully or not. SCI_SUCCESS Currently this value is always returned and - * indicates successful processing of the TC response. - */ -static enum sci_status -scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - void *frame_header; - struct smp_resp *rsp_hdr = &sci_req->smp.rsp; - ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); - - status = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &frame_header); - - /* byte swap the header. */ - sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); - - if (rsp_hdr->frame_type == SMP_RESPONSE) { - void *smp_resp; - - status = scic_sds_unsolicited_frame_control_get_buffer( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &smp_resp); - - word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / - sizeof(u32); - - sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, - smp_resp, word_cnt); - - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); - } else { - /* This was not a response frame why did it get forwarded? */ - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC SMP Request 0x%p received unexpected frame " - "%d type 0x%02x\n", - __func__, - sci_req, - frame_index, - rsp_hdr->frame_type); - - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_SMP_FRM_TYPE_ERR, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } - - scic_sds_controller_release_frame(sci_req->owning_controller, - frame_index); - - return SCI_SUCCESS; -} - - -/** - * This method processes an abnormal TC completion while the SMP request is - * waiting for a response frame. It decides what happened to the IO based - * on TC completion status. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* - * In the AWAIT RESPONSE state, any TC completion is unexpected. - * but if the TC has success status, we complete the IO anyway. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): - /* - * These status has been seen in a specific LSI expander, which sometimes - * is not able to send smp response within 2 ms. This causes our hardware - * break the connection and set TC completion with one of these SMP_XXX_XX_ERR - * status. For these type of error, we ask scic user to retry the request. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - - -/** - * This method processes the completions transport layer (TL) status to - * determine if the SMP request was sent successfully. If the SMP request - * was sent successfully, then the state for the SMP request transits to - * waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - - -static const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[] = { - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, - .frame_handler = scic_sds_smp_request_await_response_frame_handler, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, - } -}; - -/** - * This method performs the actions required when entering the - * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This - * includes setting the IO request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_smp_request_started_await_response_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_smp_request_started_substate_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); -} - -/** - * This method performs the actions required when entering the - * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION sub-state. - * This includes setting the SMP request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_smp_request_started_await_tc_completion_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_smp_request_started_substate_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION - ); -} - -static const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .enter_state = scic_sds_smp_request_started_await_response_substate_enter, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter, - }, -}; - -/** - * This method is called by the SCI user to build an SMP IO request. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the SMP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. - */ -enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) -{ - struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); - - if (!smp_req) - return SCI_FAILURE_INSUFFICIENT_RESOURCES; - - sci_req->protocol = SCIC_SMP_PROTOCOL; - sci_req->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_smp_request_started_substate_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); - - /* Construct the SMP SCU Task Context */ - memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req)); - - /* - * Look at the SMP requests' header fields; for certain SAS 1.x SMP - * functions under SAS 2.0, a zero request length really indicates - * a non-zero default length. */ - if (smp_req->req_len == 0) { - switch (smp_req->func) { - case SMP_DISCOVER: - case SMP_REPORT_PHY_ERR_LOG: - case SMP_REPORT_PHY_SATA: - case SMP_REPORT_ROUTE_INFO: - smp_req->req_len = 2; - break; - case SMP_CONF_ROUTE_INFO: - case SMP_PHY_CONTROL: - case SMP_PHY_TEST_FUNCTION: - smp_req->req_len = 9; - break; - /* Default - zero is a valid default for 2.0. */ - } - } - - scu_smp_request_construct_task_context(sci_req, smp_req); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - kfree(smp_req); - - return SCI_SUCCESS; -} diff --git a/drivers/scsi/isci/core/scic_sds_smp_request.h b/drivers/scsi/isci/core/scic_sds_smp_request.h deleted file mode 100644 index f432b7a182c..00000000000 --- a/drivers/scsi/isci/core/scic_sds_smp_request.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _SCIC_SDS_SMP_REQUEST_T_ -#define _SCIC_SDS_SMP_REQUEST_T_ - -#include "scic_sds_request.h" - - -u32 scic_sds_smp_request_get_object_size(void); - - -void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); - -#endif /* _SCIC_SDS_SMP_REQUEST_T_ */ - diff --git a/drivers/scsi/isci/core/scic_sds_ssp_request.c b/drivers/scsi/isci/core/scic_sds_ssp_request.c deleted file mode 100644 index 3fdf68be7a4..00000000000 --- a/drivers/scsi/isci/core/scic_sds_ssp_request.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "host.h" -#include "state_machine.h" -#include "scic_sds_request.h" -#include "scu_completion_codes.h" -#include "scu_task_context.h" - -/** - * This method processes the completions transport layer (TL) status to - * determine if the RAW task management frame was sent successfully. If the - * raw frame was sent successfully, then the state for the task request - * transitions to waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): - /* - * Currently, the decision is to simply allow the task request to - * timeout if the task IU wasn't received successfully. - * There is a potential for receiving multiple task responses if we - * decide to send the task IU again. */ - dev_warn(scic_to_dev(sci_req->owning_controller), - "%s: TaskRequest:0x%p CompletionCode:%x - " - "ACK/NAK timeout\n", - __func__, - sci_req, - completion_code); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * This method is responsible for processing a terminate/abort request for this - * TC while the request is waiting for the task management response - * unsolicited frame. - * @sci_req: This parameter specifies the request for which the - * termination was requested. - * - * This method returns an indication as to whether the abort request was - * successfully handled. need to update to ensure the received UF doesn't cause - * damage to subsequent requests (i.e. put the extended tag in a holding - * pattern for this particular device). - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler( - struct scic_sds_request *request) -{ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -/** - * This method processes an unsolicited frame while the task mgmt request is - * waiting for a response frame. It will copy the response data, release - * the unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This method returns an indication of whether the TC response frame was - * handled successfully or not. SCI_SUCCESS Currently this value is always - * returned and indicates successful processing of the TC response. Should - * probably update to check frame type and make sure it is a response frame. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler( - struct scic_sds_request *request, - u32 frame_index) -{ - scic_sds_io_request_copy_response(request); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - scic_sds_controller_release_frame(request->owning_controller, - frame_index); - return SCI_SUCCESS; -} - -static const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[] = { - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, - .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, - } -}; - -/** - * This method performs the actions required when entering the - * SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - * sub-state. This includes setting the IO request state handlers for this - * sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_ssp_task_request_started_substate_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); -} - -/** - * This method performs the actions required when entering the - * SCIC_SDS_IO_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This - * includes setting the IO request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_ssp_task_request_started_substate_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); -} - -const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[] = { - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, - }, -}; - diff --git a/drivers/scsi/isci/core/scic_sds_stp_packet_request.h b/drivers/scsi/isci/core/scic_sds_stp_packet_request.h deleted file mode 100644 index e94d689e510..00000000000 --- a/drivers/scsi/isci/core/scic_sds_stp_packet_request.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _SCIC_SDS_STP_PACKET_REQUEST_H_ -#define _SCIC_SDS_STP_PACKET_REQUEST_H_ - -#include "scic_sds_stp_request.h" - -/** - * This file contains the structures and constants for PACKET protocol requests. - * - * - */ - - -/** - * - * - * This is the enumeration of the SATA PIO DATA IN started substate machine. - */ -enum _scic_sds_stp_packet_request_started_substates { - /** - * While in this state the IO request object is waiting for the TC completion - * notification for the H2D Register FIS - */ - SCIC_SDS_STP_PACKET_REQUEST_STARTED_PACKET_PHASE_AWAIT_TC_COMPLETION_SUBSTATE, - - /** - * While in this state the IO request object is waiting for either a PIO Setup. - */ - SCIC_SDS_STP_PACKET_REQUEST_STARTED_PACKET_PHASE_AWAIT_PIO_SETUP_SUBSTATE, - - /** - * While in this state the IO request object is waiting for TC completion for - * the Packet DMA DATA fis or Raw Frame. - */ - SCIC_SDS_STP_PACKET_REQUEST_STARTED_COMMAND_PHASE_AWAIT_TC_COMPLETION_SUBSTATE, - - /** - * The non-data IO transit to this state in this state after receiving TC - * completion. While in this state IO request object is waiting for D2H status - * frame as UF. - */ - SCIC_SDS_STP_PACKET_REQUEST_STARTED_COMMAND_PHASE_AWAIT_D2H_FIS_SUBSTATE, - - /** - * The IO transit to this state in this state if the previous TC completion status - * is not success and the atapi device is suspended due to target device failed the IO. - * While in this state IO request object is waiting for device coming out of the - * suspension state then complete the IO. - */ - SCIC_SDS_STP_PACKET_REQUEST_STARTED_COMPLETION_DELAY_SUBSTATE, -}; - -#define scic_sds_stp_packet_request_construct(request) SCI_FAILURE -#define scu_stp_packet_request_command_phase_construct_task_context(reqeust, tc) -#define scu_stp_packet_request_command_phase_reconstruct_raw_frame_task_context(reqeust, tc) -#define scic_sds_stp_packet_request_process_status_fis(reqeust, fis) SCI_FAILURE -#define scic_sds_stp_packet_internal_request_sense_build_sgl(request) - -#endif /* _SCIC_SDS_STP_PACKET_REQUEST_H_ */ - diff --git a/drivers/scsi/isci/core/scic_sds_stp_pio_request.h b/drivers/scsi/isci/core/scic_sds_stp_pio_request.h deleted file mode 100644 index e015a111476..00000000000 --- a/drivers/scsi/isci/core/scic_sds_stp_pio_request.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_SDS_SATA_PIO_REQUEST_H_ -#define _SCIC_SDS_SATA_PIO_REQUEST_H_ - -#include "scic_sds_request.h" -#include "scu_task_context.h" - -/** - * This file contains the structures and constants for SATA PIO requests. - * - * - */ - - -/** - * - * - * This is the enumeration of the SATA PIO DATA IN started substate machine. - */ -enum _scic_sds_stp_request_started_pio_substates { - /** - * While in this state the IO request object is waiting for the TC completion - * notification for the H2D Register FIS - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE, - - /** - * While in this state the IO request object is waiting for either a PIO Setup - * FIS or a D2H register FIS. The type of frame received is based on the - * result of the prior frame and line conditions. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE, - - /** - * While in this state the IO request object is waiting for a DATA frame from - * the device. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE, - - /** - * While in this state the IO request object is waiting to transmit the next data - * frame to the device. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, -}; - -struct scic_sds_stp_request; - - -#endif /* _SCIC_SDS_SATA_PIO_REQUEST_H_ */ diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.c b/drivers/scsi/isci/core/scic_sds_stp_request.c deleted file mode 100644 index 308f486514e..00000000000 --- a/drivers/scsi/isci/core/scic_sds_stp_request.c +++ /dev/null @@ -1,1594 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "sas.h" -#include "state_machine.h" -#include "scic_io_request.h" -#include "remote_device.h" -#include "scic_sds_request.h" -#include "scic_sds_stp_pio_request.h" -#include "scic_sds_stp_request.h" -#include "unsolicited_frame_control.h" -#include "sci_util.h" -#include "scu_completion_codes.h" -#include "scu_event_codes.h" -#include "scu_task_context.h" -#include "request.h" - -void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - -/** - * This method is will fill in the SCU Task Context for any type of SATA - * request. This is called from the various SATA constructors. - * @sci_req: The general IO request object which is to be used in - * constructing the SCU task context. - * @task_context: The buffer pointer for the SCU task context which is being - * constructed. - * - * The general io request construction is complete. The buffer assignment for - * the command buffer is complete. none Revisit task context construction to - * determine what is common for SSP/SMP/STP task context structures. - */ -static void scu_sata_reqeust_construct_task_context( - struct scic_sds_request *sci_req, - struct scu_task_context *task_context) -{ - dma_addr_t dma_addr; - struct scic_sds_controller *controller; - struct scic_sds_remote_device *target_device; - struct scic_sds_port *target_port; - - controller = scic_sds_request_get_controller(sci_req); - target_device = scic_sds_request_get_device(sci_req); - target_port = scic_sds_request_get_port(sci_req); - - /* Fill in the TC with the its required data */ - task_context->abort = 0; - task_context->priority = SCU_TASK_PRIORITY_NORMAL; - task_context->initiator_request = 1; - task_context->connection_rate = target_device->connection_rate; - task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = - scic_sds_port_get_index(target_port); - task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; - task_context->valid = SCU_TASK_CONTEXT_VALID; - task_context->context_type = SCU_TASK_CONTEXT_TYPE; - - task_context->remote_node_index = - scic_sds_remote_device_get_index(sci_req->target_device); - task_context->command_code = 0; - - task_context->link_layer_control = 0; - task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 0; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - task_context->address_modifier = 0; - task_context->task_phase = 0x01; - - task_context->ssp_command_iu_length = - (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); - - /* Set the first word of the H2D REG FIS */ - task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; - - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - scic_sds_io_tag_get_index(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - - /* - * Copy the physical address for the command buffer to the SCU Task - * Context. We must offset the command buffer by 4 bytes because the - * first 4 bytes are transfered in the body of the TC. - */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->stp.cmd) + - sizeof(u32)); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); - - /* SATA Requests do not have a response buffer */ - task_context->response_iu_upper = 0; - task_context->response_iu_lower = 0; -} - -/** - * - * @sci_req: - * - * This method will perform any general sata request construction. What part of - * SATA IO request construction is general? none - */ -static void scic_sds_stp_non_ncq_request_construct( - struct scic_sds_request *sci_req) -{ - sci_req->has_started_substate_machine = true; -} - -/** - * - * @sci_req: This parameter specifies the request to be constructed as an - * optimized request. - * @optimized_task_type: This parameter specifies whether the request is to be - * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A - * value of 1 indicates NCQ. - * - * This method will perform request construction common to all types of STP - * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method - * returns an indication as to whether the construction was successful. - */ -static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, - u8 optimized_task_type, - u32 len, - enum dma_data_direction dir) -{ - struct scu_task_context *task_context = sci_req->task_context_buffer; - - /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(sci_req, task_context); - - /* Copy over the SGL elements */ - scic_sds_request_build_sgl(sci_req); - - /* Copy over the number of bytes to be transfered */ - task_context->transfer_length_bytes = len; - - if (dir == DMA_TO_DEVICE) { - /* - * The difference between the DMA IN and DMA OUT request task type - * values are consistent with the difference between FPDMA READ - * and FPDMA WRITE values. Add the supplied task type parameter - * to this difference to set the task type properly for this - * DATA OUT (WRITE) case. */ - task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT - - SCU_TASK_TYPE_DMA_IN); - } else { - /* - * For the DATA IN (READ) case, simply save the supplied - * optimized task type. */ - task_context->task_type = optimized_task_type; - } -} - -/** - * - * @sci_req: This parameter specifies the request to be constructed. - * - * This method will construct the STP UDMA request and its associated TC data. - * This method returns an indication as to whether the construction was - * successful. SCI_SUCCESS Currently this method always returns this value. - */ -enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - scic_sds_stp_optimized_request_construct(sci_req, - SCU_TASK_TYPE_FPDMAQ_READ, - len, dir); - return SCI_SUCCESS; -} - -/** - * scu_stp_raw_request_construct_task_context - - * @sci_req: This parameter specifies the STP request object for which to - * construct a RAW command frame task context. - * @task_context: This parameter specifies the SCU specific task context buffer - * to construct. - * - * This method performs the operations common to all SATA/STP requests - * utilizing the raw frame method. none - */ -static void scu_stp_raw_request_construct_task_context( - struct scic_sds_stp_request *stp_req, - struct scu_task_context *task_context) -{ - struct scic_sds_request *sci_req = to_sci_req(stp_req); - - scu_sata_reqeust_construct_task_context(sci_req, task_context); - - task_context->control_frame = 0; - task_context->priority = SCU_TASK_PRIORITY_NORMAL; - task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; - task_context->type.stp.fis_type = FIS_REGH2D; - task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); -} - -void scic_stp_io_request_set_ncq_tag( - struct scic_sds_request *req, - u16 ncq_tag) -{ - /** - * @note This could be made to return an error to the user if the user - * attempts to set the NCQ tag in the wrong state. - */ - req->task_context_buffer->type.stp.ncq_tag = ncq_tag; -} - -/** - * - * @sci_req: - * - * Get the next SGL element from the request. - Check on which SGL element pair - * we are working - if working on SLG pair element A - advance to element B - - * else - check to see if there are more SGL element pairs for this IO request - * - if there are more SGL element pairs - advance to the next pair and return - * element A struct scu_sgl_element* - */ -static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) -{ - struct scu_sgl_element *current_sgl; - struct scic_sds_request *sci_req = to_sci_req(stp_req); - struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; - - if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - if (pio_sgl->sgl_pair->B.address_lower == 0 && - pio_sgl->sgl_pair->B.address_upper == 0) { - current_sgl = NULL; - } else { - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; - current_sgl = &pio_sgl->sgl_pair->B; - } - } else { - if (pio_sgl->sgl_pair->next_pair_lower == 0 && - pio_sgl->sgl_pair->next_pair_upper == 0) { - current_sgl = NULL; - } else { - u64 phys_addr; - - phys_addr = pio_sgl->sgl_pair->next_pair_upper; - phys_addr <<= 32; - phys_addr |= pio_sgl->sgl_pair->next_pair_lower; - - pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; - current_sgl = &pio_sgl->sgl_pair->A; - } - } - - return current_sgl; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, - } -}; - -static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_non_data_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_non_data_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter, - }, -}; - -enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_non_data_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} - -#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ - -/* transmit DATA_FIS from (current sgl + offset) for input - * parameter length. current sgl and offset is alreay stored in the IO request - */ -static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( - struct scic_sds_request *sci_req, - u32 length) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scu_task_context *task_context; - struct scu_sgl_element *current_sgl; - - /* Recycle the TC and reconstruct it for sending out DATA FIS containing - * for the data from current_sgl+offset for the input length - */ - task_context = scic_sds_controller_get_task_context_buffer(scic, - sci_req->io_tag); - - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) - current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; - else - current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; - - /* update the TC */ - task_context->command_iu_upper = current_sgl->address_upper; - task_context->command_iu_lower = current_sgl->address_lower; - task_context->transfer_length_bytes = length; - task_context->type.stp.fis_type = FIS_DATA; - - /* send the new TC out. */ - return scic_controller_continue_io(sci_req); -} - -static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) -{ - - struct scu_sgl_element *current_sgl; - u32 sgl_offset; - u32 remaining_bytes_in_current_sgl = 0; - enum sci_status status = SCI_SUCCESS; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - sgl_offset = stp_req->type.pio.request_current.sgl_offset; - - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; - } else { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; - } - - - if (stp_req->type.pio.pio_transfer_bytes > 0) { - if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); - if (status == SCI_SUCCESS) { - stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; - - /* update the current sgl, sgl_offset and save for future */ - current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); - sgl_offset = 0; - } - } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ - scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); - - if (status == SCI_SUCCESS) { - /* Sgl offset will be adjusted and saved for future */ - sgl_offset += stp_req->type.pio.pio_transfer_bytes; - current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; - stp_req->type.pio.pio_transfer_bytes = 0; - } - } - } - - if (status == SCI_SUCCESS) { - stp_req->type.pio.request_current.sgl_offset = sgl_offset; - } - - return status; -} - -/** - * - * @stp_request: The request that is used for the SGL processing. - * @data_buffer: The buffer of data to be copied. - * @length: The length of the data transfer. - * - * Copy the data from the buffer for the length specified to the IO reqeust SGL - * specified data region. enum sci_status - */ -static enum sci_status -scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, - u8 *data_buf, u32 len) -{ - struct scic_sds_request *sci_req; - struct isci_request *ireq; - u8 *src_addr; - int copy_len; - struct sas_task *task; - struct scatterlist *sg; - void *kaddr; - int total_len = len; - - sci_req = to_sci_req(stp_req); - ireq = sci_req_to_ireq(sci_req); - task = isci_request_access_task(ireq); - src_addr = data_buf; - - if (task->num_scatter > 0) { - sg = task->scatter; - - while (total_len > 0) { - struct page *page = sg_page(sg); - - copy_len = min_t(int, total_len, sg_dma_len(sg)); - kaddr = kmap_atomic(page, KM_IRQ0); - memcpy(kaddr + sg->offset, src_addr, copy_len); - kunmap_atomic(kaddr, KM_IRQ0); - total_len -= copy_len; - src_addr += copy_len; - sg = sg_next(sg); - } - } else { - BUG_ON(task->total_xfer_len < total_len); - memcpy(task->scatter, src_addr, total_len); - } - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: The PIO DATA IN request that is to receive the data. - * @data_buffer: The buffer to copy from. - * - * Copy the data buffer to the io request data region. enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( - struct scic_sds_stp_request *sci_req, - u8 *data_buffer) -{ - enum sci_status status; - - /* - * If there is less than 1K remaining in the transfer request - * copy just the data for the transfer */ - if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); - - if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes = 0; - } else { - /* We are transfering the whole frame so copy */ - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); - - if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; - } - - return status; -} - -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); - break; - } - - return status; -} - -static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - struct dev_to_host_fis *frame_header; - enum sci_status status; - u32 *frame_buffer; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - switch (frame_header->fis_type) { - case FIS_PIO_SETUP: - /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - /* Get the data from the PIO Setup The SCU Hardware returns - * first word in the frame_header and the rest of the data is in - * the frame buffer so we need to back up one dword - */ - - /* transfer_count: first 16bits in the 4th dword */ - stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; - - /* ending_status: 4th byte in the 3rd dword */ - stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - sci_req->stp.rsp.status = stp_req->type.pio.ending_status; - - /* The next state is dependent on whether the - * request was PIO Data-in or Data out - */ - if (task->data_dir == DMA_FROM_DEVICE) { - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); - } else if (task->data_dir == DMA_TO_DEVICE) { - /* Transmit data */ - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status != SCI_SUCCESS) - break; - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); - } - break; - case FIS_SETDEVBITS: - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - break; - case FIS_REGD2H: - if (frame_header->status & ATA_BUSY) { - /* Now why is the drive sending a D2H Register FIS when - * it is still busy? Do nothing since we are still in - * the right state. - */ - dev_dbg(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received " - "D2H Register FIS with BSY status " - "0x%x\n", __func__, stp_req, - frame_header->status); - break; - } - - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.req, - frame_header, - frame_buffer); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - default: - /* FIXME: what do we do here? */ - break; - } - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - struct sata_fis_data *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - if (frame_header->fis_type == FIS_DATA) { - if (stp_req->type.pio.request_current.sgl_pair == NULL) { - sci_req->saved_rx_frame_index = frame_index; - stp_req->type.pio.pio_transfer_bytes = 0; - } else { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, - (u8 *)frame_buffer); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - /* Check for the end of the transfer, are there more - * bytes remaining for this data transfer - */ - if (status != SCI_SUCCESS || - stp_req->type.pio.pio_transfer_bytes != 0) - return status; - - if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } else { - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - } - } else { - dev_err(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received frame %d " - "with fis type 0x%02x when expecting a data " - "fis.\n", __func__, stp_req, frame_index, - frame_header->fis_type); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_GOOD, - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - return status; -} - - -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( - - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - bool all_frames_transferred = false; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* Transmit data */ - if (stp_req->type.pio.pio_transfer_bytes != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status == SCI_SUCCESS) { - if (stp_req->type.pio.pio_transfer_bytes == 0) - all_frames_transferred = true; - } - } else if (stp_req->type.pio.pio_transfer_bytes == 0) { - /* - * this will happen if the all data is written at the - * first time after the pio setup fis is received - */ - all_frames_transferred = true; - } - - /* all data transferred. */ - if (all_frames_transferred) { - /* - * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE - * and wait for PIO_SETUP fis / or D2H REg fis. */ - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - } - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); - break; - } - - return status; -} - -/** - * - * @request: This is the request which is receiving the event. - * @event_code: This is the event code that the request on which the request is - * expected to take action. - * - * This method will handle any link layer events while waiting for the data - * frame. enum sci_status SCI_SUCCESS SCI_FAILURE - */ -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler( - struct scic_sds_request *request, - u32 event_code) -{ - enum sci_status status; - - switch (scu_get_event_specifier(event_code)) { - case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: - /* - * We are waiting for data and the SCU has R_ERR the data frame. - * Go back to waiting for the D2H Register FIS */ - sci_base_state_machine_change_state( - &request->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - - status = SCI_SUCCESS; - break; - - default: - dev_err(scic_to_dev(request->owning_controller), - "%s: SCIC PIO Request 0x%p received unexpected " - "event 0x%08x\n", - __func__, request, event_code); - - /* / @todo Should we fail the PIO request when we get an unexpected event? */ - status = SCI_FAILURE; - break; - } - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, - .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, - } -}; - -static void scic_sds_stp_request_started_pio_await_h2d_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req); -} - -static void scic_sds_stp_request_started_pio_await_frame_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); -} - -static void scic_sds_stp_request_started_pio_data_in_await_data_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE - ); -} - -static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_await_frame_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter, - } -}; - -enum sci_status -scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, - bool copy_rx_frame) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - scu_stp_raw_request_construct_task_context(stp_req, - sci_req->task_context_buffer); - - pio->current_transfer_bytes = 0; - pio->ending_error = 0; - pio->ending_status = 0; - - pio->request_current.sgl_offset = 0; - pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; - - if (copy_rx_frame) { - scic_sds_request_build_sgl(sci_req); - /* Since the IO request copy of the TC contains the same data as - * the actual TC this pointer is vaild for either. - */ - pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; - } else { - /* The user does not want the data copied to the SGL buffer location */ - pio->request_current.sgl_pair = NULL; - } - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_pio_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} - -static void scic_sds_stp_request_udma_complete_request( - struct scic_sds_request *request, - u32 scu_status, - enum sci_status sci_status) -{ - scic_sds_request_set_status(request, scu_status, sci_status); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); -} - -static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct dev_to_host_fis *frame_header; - enum sci_status status; - u32 *frame_buffer; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if ((status == SCI_SUCCESS) && - (frame_header->fis_type == FIS_REGD2H)) { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - } - - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - break; - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): - /* - * We must check ther response buffer to see if the D2H Register FIS was - * received before we got the TC completion. */ - if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { - scic_sds_remote_device_suspend(sci_req->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - } else { - /* - * If we have an error completion status for the TC then we can expect a - * D2H register FIS from the device so we must change state to wait for it */ - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); - } - break; - - /* - * / @todo Check to see if any of these completion status need to wait for - * / the device to host register fis. */ - /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): - scic_sds_remote_device_suspend(sci_req->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - /* Fall through to the default case */ - default: - /* All other completion status cause the IO to be complete. */ - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - break; - } - - return status; -} - -static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - - /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); - - if (status != SCI_SUCCESS) - return status; - - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, - .frame_handler = scic_sds_stp_request_udma_general_frame_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, - }, -}; - -static void scic_sds_stp_request_started_udma_await_tc_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_udma_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); -} - -/** - * - * - * This state is entered when there is an TC completion failure. The hardware - * received an unexpected condition while processing the IO request and now - * will UF the D2H register FIS to complete the IO. - */ -static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_udma_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter, - }, -}; - -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - scic_sds_stp_non_ncq_request_construct(sci_req); - - scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, - len, dir); - - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_udma_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, - }, -}; - -static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - struct scu_task_context *task_context; - struct host_to_dev_fis *h2d_fis; - enum sci_status status; - - /* Clear the SRST bit */ - h2d_fis = &sci_req->stp.cmd; - h2d_fis->control = 0; - - /* Clear the TC control bit */ - task_context = scic_sds_controller_get_task_context_buffer( - sci_req->owning_controller, sci_req->io_tag); - task_context->control_frame = 0; - - status = scic_controller_continue_io(sci_req); - if (status == SCI_SUCCESS) { - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); - } -} - -static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE - ); -} - -static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter, - }, -}; - -enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_soft_reset_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} diff --git a/drivers/scsi/isci/core/scic_sds_stp_request.h b/drivers/scsi/isci/core/scic_sds_stp_request.h deleted file mode 100644 index f5434f12ad0..00000000000 --- a/drivers/scsi/isci/core/scic_sds_stp_request.h +++ /dev/null @@ -1,178 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_SDS_STP_REQUEST_T_ -#define _SCIC_SDS_STP_REQUEST_T_ - -#include <linux/dma-mapping.h> -#include <scsi/sas.h> - -struct scic_sds_stp_request { - union { - u32 ncq; - - u32 udma; - - struct scic_sds_stp_pio_request { - /** - * Total transfer for the entire PIO request recorded at request constuction - * time. - * - * @todo Should we just decrement this value for each byte of data transitted - * or received to elemenate the current_transfer_bytes field? - */ - u32 total_transfer_bytes; - - /** - * Total number of bytes received/transmitted in data frames since the start - * of the IO request. At the end of the IO request this should equal the - * total_transfer_bytes. - */ - u32 current_transfer_bytes; - - /** - * The number of bytes requested in the in the PIO setup. - */ - u32 pio_transfer_bytes; - - /** - * PIO Setup ending status value to tell us if we need to wait for another FIS - * or if the transfer is complete. On the receipt of a D2H FIS this will be - * the status field of that FIS. - */ - u8 ending_status; - - /** - * On receipt of a D2H FIS this will be the ending error field if the - * ending_status has the SATA_STATUS_ERR bit set. - */ - u8 ending_error; - - struct scic_sds_request_pio_sgl { - struct scu_sgl_element_pair *sgl_pair; - u8 sgl_set; - u32 sgl_offset; - } request_current; - } pio; - - struct { - /** - * The number of bytes requested in the PIO setup before CDB data frame. - */ - u32 device_preferred_cdb_length; - } packet; - } type; -}; - -/** - * enum scic_sds_stp_request_started_udma_substates - This enumeration depicts - * the various sub-states associated with a SATA/STP UDMA protocol operation. - * - * - */ -enum scic_sds_stp_request_started_udma_substates { - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE, -}; - -/** - * enum scic_sds_stp_request_started_non_data_substates - This enumeration - * depicts the various sub-states associated with a SATA/STP non-data - * protocol operation. - * - * - */ -enum scic_sds_stp_request_started_non_data_substates { - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE, -}; - -/** - * enum scic_sds_stp_request_started_soft_reset_substates - THis enumeration - * depicts the various sub-states associated with a SATA/STP soft reset - * operation. - * - * - */ -enum scic_sds_stp_request_started_soft_reset_substates { - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, -}; - -u32 scic_sds_stp_request_get_object_size(void); - -enum sci_status scic_sds_stp_pio_request_construct( - struct scic_sds_request *scic_io_request, - bool copy_rx_frame); - -enum sci_status scic_sds_stp_udma_request_construct( - struct scic_sds_request *sci_req, - u32 transfer_length, - enum dma_data_direction dir); - -enum sci_status scic_sds_stp_non_data_request_construct( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_stp_soft_reset_request_construct( - struct scic_sds_request *sci_req); - -enum sci_status scic_sds_stp_ncq_request_construct( - struct scic_sds_request *sci_req, - u32 transfer_length, - enum dma_data_direction dir); - - -#endif /* _SCIC_SDS_STP_REQUEST_T_ */ diff --git a/drivers/scsi/isci/core/scic_task_request.h b/drivers/scsi/isci/core/scic_task_request.h deleted file mode 100644 index 790cee9b4af..00000000000 --- a/drivers/scsi/isci/core/scic_task_request.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_TASK_REQUEST_H_ -#define _SCIC_TASK_REQUEST_H_ - -#include "isci.h" - -struct scic_sds_request; -struct scic_sds_remote_device; -struct scic_sds_controller; - - -enum sci_status scic_task_request_construct( - struct scic_sds_controller *scic_controller, - struct scic_sds_remote_device *scic_remote_device, - u16 io_tag, struct scic_sds_request *sci_req); - -/** - * scic_task_request_construct_ssp() - This method is called by the SCI user to - * construct all SCI Core SSP task management requests. Memory - * initialization and functionality common to all task request types is - * performed in this method. - * @scic_task_request: This parameter specifies the handle to the core task - * request object for which to construct a SATA specific task management - * request. - * - * Indicate if the controller successfully built the task request. SCI_SUCCESS - * This value is returned if the task request was successfully built. - */ -enum sci_status scic_task_request_construct_ssp( - struct scic_sds_request *scic_task_request); - -/** - * scic_task_request_construct_sata() - This method is called by the SCI user - * to construct all SCI Core SATA task management requests. Memory - * initialization and functionality common to all task request types is - * performed in this method. - * @scic_task_request_handle: This parameter specifies the handle to the core - * task request object for which to construct a SATA specific task - * management request. - * - * Indicate if the controller successfully built the task request. SCI_SUCCESS - * This value is returned if the task request was successfully built. - */ -enum sci_status scic_task_request_construct_sata( - struct scic_sds_request *scic_task_request_handle); - - - -#endif /* _SCIC_TASK_REQUEST_H_ */ - diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 71a0466d1c8..e1930da08d4 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -61,9 +61,7 @@ #include "probe_roms.h" #include "remote_device.h" #include "request.h" -#include "scic_io_request.h" #include "scic_sds_port_configuration_agent.h" -#include "sci_util.h" #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "registers.h" diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index d288897b85f..69826eac97b 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -521,6 +521,25 @@ enum sci_task_status { }; +/** + * sci_swab32_cpy - convert between scsi and scu-hardware byte format + * @dest: receive the 4-byte endian swapped version of src + * @src: word aligned source buffer + * + * scu hardware handles SSP/SMP control, response, and unidentified + * frames in "big endian dword" order. Regardless of host endian this + * is always a swab32()-per-dword conversion of the standard definition, + * i.e. single byte fields swapped and multi-byte fields in little- + * endian + */ +static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt) +{ + u32 *dest = _dest, *src = _src; + + while (--word_cnt >= 0) + dest[word_cnt] = swab32(src[word_cnt]); +} + extern unsigned char no_outbound_task_to; extern u16 ssp_max_occ_to; extern u16 stp_max_occ_to; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index f44fa20cad9..d4bf6d2d8c9 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -55,7 +55,6 @@ #include <linux/workqueue.h> #include "isci.h" -#include "scic_io_request.h" #include "scic_phy.h" #include "scic_sds_phy.h" #include "scic_port.h" diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 00334b9ccd8..23390102514 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -57,14 +57,11 @@ #include "port.h" #include "remote_device.h" #include "request.h" -#include "scic_io_request.h" #include "scic_phy.h" #include "scic_port.h" #include "scic_sds_phy.h" #include "scic_sds_port.h" #include "remote_node_context.h" -#include "scic_sds_request.h" -#include "sci_util.h" #include "scu_event_codes.h" #include "task.h" diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 5891f3d5017..35231e7407b 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -58,7 +58,6 @@ #include "scic_sds_port.h" #include "remote_device.h" #include "remote_node_context.h" -#include "sci_util.h" #include "scu_event_codes.h" #include "scu_task_context.h" diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c index 80f44c25f45..6b9465a5a21 100644 --- a/drivers/scsi/isci/remote_node_table.c +++ b/drivers/scsi/isci/remote_node_table.c @@ -59,7 +59,6 @@ * * */ -#include "sci_util.h" #include "remote_node_table.h" #include "remote_node_context.h" diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 4961ee34709..857ad067f11 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -54,898 +54,1286 @@ */ #include "isci.h" -#include "scic_io_request.h" -#include "scic_task_request.h" #include "scic_port.h" #include "task.h" #include "request.h" #include "sata.h" #include "scu_completion_codes.h" -#include "scic_sds_request.h" #include "sas.h" -static enum sci_status isci_request_ssp_request_construct( - struct isci_request *request) -{ - enum sci_status status; +/** + * This method returns the sgl element pair for the specificed sgl_pair index. + * @sci_req: This parameter specifies the IO request for which to retrieve + * the Scatter-Gather List element pair. + * @sgl_pair_index: This parameter specifies the index into the SGL element + * pair to be retrieved. + * + * This method returns a pointer to an struct scu_sgl_element_pair. + */ +static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( + struct scic_sds_request *sci_req, + u32 sgl_pair_index + ) { + struct scu_task_context *task_context; + + task_context = (struct scu_task_context *)sci_req->task_context_buffer; + + if (sgl_pair_index == 0) { + return &task_context->sgl_pair_ab; + } else if (sgl_pair_index == 1) { + return &task_context->sgl_pair_cd; + } - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p\n", - __func__, - request); - status = scic_io_request_construct_basic_ssp(&request->sci); - return status; + return &sci_req->sg_table[sgl_pair_index - 2]; } -static enum sci_status isci_request_stp_request_construct( - struct isci_request *request) +/** + * This function will build the SGL list for an IO request. + * @sci_req: This parameter specifies the IO request for which to build + * the Scatter-Gather List. + * + */ +void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) { - struct sas_task *task = isci_request_access_task(request); - enum sci_status status; - struct host_to_dev_fis *register_fis; - - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p\n", - __func__, - request); + struct isci_request *isci_request = sci_req_to_ireq(sds_request); + struct isci_host *isci_host = isci_request->isci_host; + struct sas_task *task = isci_request_access_task(isci_request); + struct scatterlist *sg = NULL; + dma_addr_t dma_addr; + u32 sg_idx = 0; + struct scu_sgl_element_pair *scu_sg = NULL; + struct scu_sgl_element_pair *prev_sg = NULL; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (sg) { + scu_sg = scic_sds_request_get_sgl_element_pair( + sds_request, + sg_idx); + + SCU_SGL_COPY(scu_sg->A, sg); + + sg = sg_next(sg); + + if (sg) { + SCU_SGL_COPY(scu_sg->B, sg); + sg = sg_next(sg); + } else + SCU_SGL_ZERO(scu_sg->B); + + if (prev_sg) { + dma_addr = + scic_io_request_get_dma_addr( + sds_request, + scu_sg); + + prev_sg->next_pair_upper = + upper_32_bits(dma_addr); + prev_sg->next_pair_lower = + lower_32_bits(dma_addr); + } + + prev_sg = scu_sg; + sg_idx++; + } + } else { /* handle when no sg */ + scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, + sg_idx); - /* Get the host_to_dev_fis from the core and copy - * the fis from the task into it. - */ - register_fis = isci_sata_task_to_fis_copy(task); + dma_addr = dma_map_single(&isci_host->pdev->dev, + task->scatter, + task->total_xfer_len, + task->data_dir); - status = scic_io_request_construct_basic_sata(&request->sci); + isci_request->zero_scatter_daddr = dma_addr; - /* Set the ncq tag in the fis, from the queue - * command in the task. - */ - if (isci_sata_is_task_ncq(task)) { + scu_sg->A.length = task->total_xfer_len; + scu_sg->A.address_upper = upper_32_bits(dma_addr); + scu_sg->A.address_lower = lower_32_bits(dma_addr); + } - isci_sata_set_ncq_tag( - register_fis, - task - ); + if (scu_sg) { + scu_sg->next_pair_upper = 0; + scu_sg->next_pair_lower = 0; } +} - return status; +static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req) +{ + if (sci_req->was_tag_assigned_by_user == false) + sci_req->task_context_buffer = &sci_req->tc; } -/* - * isci_smp_request_build() - This function builds the smp request. - * @ireq: This parameter points to the isci_request allocated in the - * request construct function. - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -static enum sci_status isci_smp_request_build(struct isci_request *ireq) +static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) { - enum sci_status status = SCI_FAILURE; + struct ssp_cmd_iu *cmd_iu; + struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); - struct scic_sds_request *sci_req = &ireq->sci; - dev_dbg(&ireq->isci_host->pdev->dev, - "%s: request = %p\n", __func__, ireq); + cmd_iu = &sci_req->ssp.cmd; - dev_dbg(&ireq->isci_host->pdev->dev, - "%s: smp_req len = %d\n", - __func__, - task->smp_task.smp_req.length); + memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); + cmd_iu->add_cdb_len = 0; + cmd_iu->_r_a = 0; + cmd_iu->_r_b = 0; + cmd_iu->en_fburst = 0; /* unsupported */ + cmd_iu->task_prio = task->ssp_task.task_prio; + cmd_iu->task_attr = task->ssp_task.task_attr; + cmd_iu->_r_c = 0; - /* copy the smp_command to the address; */ - sg_copy_to_buffer(&task->smp_task.smp_req, 1, - &sci_req->smp.cmd, - sizeof(struct smp_req)); + sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, + sizeof(task->ssp_task.cdb) / sizeof(u32)); +} - status = scic_io_request_construct_smp(sci_req); - if (status != SCI_SUCCESS) - dev_warn(&ireq->isci_host->pdev->dev, - "%s: failed with status = %d\n", - __func__, - status); +static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) +{ + struct ssp_task_iu *task_iu; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - return status; + task_iu = &sci_req->ssp.tmf; + + memset(task_iu, 0, sizeof(struct ssp_task_iu)); + + memcpy(task_iu->LUN, task->ssp_task.LUN, 8); + + task_iu->task_func = isci_tmf->tmf_code; + task_iu->task_tag = + (ireq->ttype == tmf_task) ? + isci_tmf->io_tag : + SCI_CONTROLLER_INVALID_IO_TAG; } /** - * isci_io_request_build() - This function builds the io request object. - * @isci_host: This parameter specifies the ISCI host object - * @request: This parameter points to the isci_request object allocated in the - * request construct function. - * @sci_device: This parameter is the handle for the sci core's remote device - * object that is the destination for this request. + * This method is will fill in the SCU Task Context for any type of SSP request. + * @sci_req: + * @task_context: * - * SCI_SUCCESS on successfull completion, or specific failure code. */ -static enum sci_status isci_io_request_build( - struct isci_host *isci_host, - struct isci_request *request, - struct isci_remote_device *isci_device) +static void scu_ssp_reqeust_construct_task_context( + struct scic_sds_request *sds_request, + struct scu_task_context *task_context) { - enum sci_status status = SCI_SUCCESS; - struct sas_task *task = isci_request_access_task(request); - struct scic_sds_remote_device *sci_device = &isci_device->sci; + dma_addr_t dma_addr; + struct scic_sds_controller *controller; + struct scic_sds_remote_device *target_device; + struct scic_sds_port *target_port; + + controller = scic_sds_request_get_controller(sds_request); + target_device = scic_sds_request_get_device(sds_request); + target_port = scic_sds_request_get_port(sds_request); + + /* Fill in the TC with the its required data */ + task_context->abort = 0; + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = target_device->connection_rate; + task_context->protocol_engine_index = + scic_sds_controller_get_protocol_engine_group(controller); + task_context->logical_port_index = + scic_sds_port_get_index(target_port); + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = + scic_sds_remote_device_get_index(sds_request->target_device); + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + + /* task_context->type.ssp.tag = sci_req->io_tag; */ + task_context->task_phase = 0x01; + + if (sds_request->was_tag_assigned_by_user) { + /* + * Build the task context now since we have already read + * the data + */ + sds_request->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + scic_sds_io_tag_get_index(sds_request->io_tag)); + } else { + /* + * Build the task context now since we have already read + * the data + * + * I/O tag index is not assigned because we have to wait + * until we get a TCi + */ + sds_request->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + owning_controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); + } - dev_dbg(&isci_host->pdev->dev, - "%s: isci_device = 0x%p; request = %p, " - "num_scatter = %d\n", - __func__, - isci_device, - request, - task->num_scatter); + /* + * Copy the physical address for the command buffer to the + * SCU Task Context + */ + dma_addr = scic_io_request_get_dma_addr(sds_request, + &sds_request->ssp.cmd); - /* map the sgl addresses, if present. - * libata does the mapping for sata devices - * before we get the request. + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* + * Copy the physical address for the response buffer to the + * SCU Task Context */ - if (task->num_scatter && - !sas_protocol_ata(task->task_proto) && - !(SAS_PROTOCOL_SMP & task->task_proto)) { + dma_addr = scic_io_request_get_dma_addr(sds_request, + &sds_request->ssp.rsp); - request->num_sg_entries = dma_map_sg( - &isci_host->pdev->dev, - task->scatter, - task->num_scatter, - task->data_dir - ); + task_context->response_iu_upper = upper_32_bits(dma_addr); + task_context->response_iu_lower = lower_32_bits(dma_addr); +} - if (request->num_sg_entries == 0) - return SCI_FAILURE_INSUFFICIENT_RESOURCES; - } +/** + * This method is will fill in the SCU Task Context for a SSP IO request. + * @sci_req: + * + */ +static void scu_ssp_io_request_construct_task_context( + struct scic_sds_request *sci_req, + enum dma_data_direction dir, + u32 len) +{ + struct scu_task_context *task_context; - /* build the common request object. For now, - * we will let the core allocate the IO tag. - */ - status = scic_io_request_construct(&isci_host->sci, sci_device, - SCI_CONTROLLER_INVALID_IO_TAG, - &request->sci); + task_context = scic_sds_request_get_task_context(sci_req); - if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, - "%s: failed request construct\n", - __func__); - return SCI_FAILURE; - } + scu_ssp_reqeust_construct_task_context(sci_req, task_context); - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - status = isci_smp_request_build(request); - break; - case SAS_PROTOCOL_SSP: - status = isci_request_ssp_request_construct(request); + task_context->ssp_command_iu_length = + sizeof(struct ssp_cmd_iu) / sizeof(u32); + task_context->type.ssp.frame_type = SSP_COMMAND; + + switch (dir) { + case DMA_FROM_DEVICE: + case DMA_NONE: + default: + task_context->task_type = SCU_TASK_TYPE_IOREAD; break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - status = isci_request_stp_request_construct(request); + case DMA_TO_DEVICE: + task_context->task_type = SCU_TASK_TYPE_IOWRITE; break; - default: - dev_warn(&isci_host->pdev->dev, - "%s: unknown protocol\n", __func__); - return SCI_FAILURE; } - return SCI_SUCCESS; + task_context->transfer_length_bytes = len; + + if (task_context->transfer_length_bytes > 0) + scic_sds_request_build_sgl(sci_req); } +static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req) +{ + if (sci_req->was_tag_assigned_by_user == false) + sci_req->task_context_buffer = &sci_req->tc; +} /** - * isci_request_alloc_core() - This function gets the request object from the - * isci_host dma cache. - * @isci_host: This parameter specifies the ISCI host object - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @isci_device: This parameter is the pointer to the isci remote device object - * that is the destination for this request. - * @gfp_flags: This parameter specifies the os allocation flags. + * This method will fill in the SCU Task Context for a SSP Task request. The + * following important settings are utilized: -# priority == + * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued + * ahead of other task destined for the same Remote Node. -# task_type == + * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type + * (i.e. non-raw frame) is being utilized to perform task management. -# + * control_frame == 1. This ensures that the proper endianess is set so + * that the bytes are transmitted in the right order for a task frame. + * @sci_req: This parameter specifies the task request object being + * constructed. * - * SCI_SUCCESS on successfull completion, or specific failure code. */ -static int isci_request_alloc_core( - struct isci_host *isci_host, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) +static void scu_ssp_task_request_construct_task_context( + struct scic_sds_request *sci_req) { - int ret = 0; - dma_addr_t handle; - struct isci_request *request; - - - /* get pointer to dma memory. This actually points - * to both the isci_remote_device object and the - * sci object. The isci object is at the beginning - * of the memory allocated here. - */ - request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); - if (!request) { - dev_warn(&isci_host->pdev->dev, - "%s: dma_pool_alloc returned NULL\n", __func__); - return -ENOMEM; - } - - /* initialize the request object. */ - spin_lock_init(&request->state_lock); - request->request_daddr = handle; - request->isci_host = isci_host; - request->isci_device = isci_device; - request->io_request_completion = NULL; - request->terminated = false; - - request->num_sg_entries = 0; - - request->complete_in_target = false; + struct scu_task_context *task_context; - INIT_LIST_HEAD(&request->completed_node); - INIT_LIST_HEAD(&request->dev_node); + task_context = scic_sds_request_get_task_context(sci_req); - *isci_request = request; - isci_request_change_state(request, allocated); + scu_ssp_reqeust_construct_task_context(sci_req, task_context); - return ret; + task_context->control_frame = 1; + task_context->priority = SCU_TASK_PRIORITY_HIGH; + task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; + task_context->transfer_length_bytes = 0; + task_context->type.ssp.frame_type = SSP_TASK; + task_context->ssp_command_iu_length = + sizeof(struct ssp_task_iu) / sizeof(u32); } -static int isci_request_alloc_io( - struct isci_host *isci_host, - struct sas_task *task, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) -{ - int retval = isci_request_alloc_core(isci_host, isci_request, - isci_device, gfp_flags); - if (!retval) { - (*isci_request)->ttype_ptr.io_task_ptr = task; - (*isci_request)->ttype = io_task; +/** + * This method constructs the SSP Command IU data for this ssp passthrough + * comand request object. + * @sci_req: This parameter specifies the request object for which the SSP + * command information unit is being built. + * + * enum sci_status, returns invalid parameter is cdb > 16 + */ - task->lldd_task = *isci_request; - } - return retval; -} /** - * isci_request_alloc_tmf() - This function gets the request object from the - * isci_host dma cache and initializes the relevant fields as a sas_task. - * @isci_host: This parameter specifies the ISCI host object - * @sas_task: This parameter is the task struct from the upper layer driver. - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @isci_device: This parameter is the pointer to the isci remote device object - * that is the destination for this request. - * @gfp_flags: This parameter specifies the os allocation flags. + * This method constructs the SATA request object. + * @sci_req: + * @sat_protocol: + * @transfer_length: + * @data_direction: + * @copy_rx_frame: * - * SCI_SUCCESS on successfull completion, or specific failure code. + * enum sci_status */ -int isci_request_alloc_tmf( - struct isci_host *isci_host, - struct isci_tmf *isci_tmf, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) +static enum sci_status +scic_io_request_construct_sata(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir, + bool copy) { - int retval = isci_request_alloc_core(isci_host, isci_request, - isci_device, gfp_flags); + enum sci_status status = SCI_SUCCESS; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); - if (!retval) { + /* check for management protocols */ + if (ireq->ttype == tmf_task) { + struct isci_tmf *tmf = isci_request_access_tmf(ireq); - (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; - (*isci_request)->ttype = tmf_task; + if (tmf->tmf_code == isci_tmf_sata_srst_high || + tmf->tmf_code == isci_tmf_sata_srst_low) + return scic_sds_stp_soft_reset_request_construct(sci_req); + else { + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: Request 0x%p received un-handled SAT " + "management protocol 0x%x.\n", + __func__, sci_req, tmf->tmf_code); + + return SCI_FAILURE; + } } - return retval; -} -/** - * isci_request_execute() - This function allocates the isci_request object, - * all fills in some common fields. - * @isci_host: This parameter specifies the ISCI host object - * @sas_task: This parameter is the task struct from the upper layer driver. - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @gfp_flags: This parameter specifies the os allocation flags. - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -int isci_request_execute( - struct isci_host *isci_host, - struct sas_task *task, - struct isci_request **isci_request, - gfp_t gfp_flags) + if (!sas_protocol_ata(task->task_proto)) { + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: Non-ATA protocol in SATA path: 0x%x\n", + __func__, + task->task_proto); + return SCI_FAILURE; + + } + + /* non data */ + if (task->data_dir == DMA_NONE) + return scic_sds_stp_non_data_request_construct(sci_req); + + /* NCQ */ + if (task->ata_task.use_ncq) + return scic_sds_stp_ncq_request_construct(sci_req, len, dir); + + /* DMA */ + if (task->ata_task.dma_xfer) + return scic_sds_stp_udma_request_construct(sci_req, len, dir); + else /* PIO */ + return scic_sds_stp_pio_request_construct(sci_req, copy); + + return status; +} + +static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) { - int ret = 0; - struct scic_sds_remote_device *sci_device; - enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - struct isci_remote_device *isci_device; - struct isci_request *request; - unsigned long flags; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); - isci_device = task->dev->lldd_dev; - sci_device = &isci_device->sci; + sci_req->protocol = SCIC_SSP_PROTOCOL; - /* do common allocation and init of request object. */ - ret = isci_request_alloc_io( - isci_host, - task, - &request, - isci_device, - gfp_flags - ); + scu_ssp_io_request_construct_task_context(sci_req, + task->data_dir, + task->total_xfer_len); - if (ret) - goto out; + scic_sds_io_request_build_ssp_command_iu(sci_req); - status = isci_io_request_build(isci_host, request, isci_device); - if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, - "%s: request_construct failed - status = 0x%x\n", - __func__, - status); - goto out; - } + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); - spin_lock_irqsave(&isci_host->scic_lock, flags); + return SCI_SUCCESS; +} - /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&isci_host->sci, sci_device, - &request->sci, - SCI_CONTROLLER_INVALID_IO_TAG); - if (status != SCI_SUCCESS && - status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { - dev_warn(&isci_host->pdev->dev, - "%s: failed request start (0x%x)\n", - __func__, status); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); - goto out; - } +enum sci_status scic_task_request_construct_ssp( + struct scic_sds_request *sci_req) +{ + /* Construct the SSP Task SCU Task Context */ + scu_ssp_task_request_construct_task_context(sci_req); - /* Either I/O started OK, or the core has signaled that - * the device needs a target reset. - * - * In either case, hold onto the I/O for later. - * - * Update it's status and add it to the list in the - * remote device object. - */ - isci_request_change_state(request, started); - list_add(&request->dev_node, &isci_device->reqs_in_process); + /* Fill in the SSP Task IU */ + scic_sds_task_request_build_ssp_task_iu(sci_req); - if (status == SCI_SUCCESS) { - /* Save the tag for possible task mgmt later. */ - request->io_tag = scic_io_request_get_io_tag(&request->sci); - } else { - /* The request did not really start in the - * hardware, so clear the request handle - * here so no terminations will be done. - */ - request->terminated = true; - } - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); - if (status == - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { - /* Signal libsas that we need the SCSI error - * handler thread to work on this I/O and that - * we want a device reset. - */ - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; - spin_unlock_irqrestore(&task->task_state_lock, flags); + return SCI_SUCCESS; +} - /* Cause this task to be scheduled in the SCSI error - * handler thread. - */ - isci_execpath_callback(isci_host, task, - sas_task_abort); - /* Change the status, since we are holding - * the I/O until it is managed by the SCSI - * error handler. - */ - status = SCI_SUCCESS; - } +static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) +{ + enum sci_status status; + struct scic_sds_stp_request *stp_req; + bool copy = false; + struct isci_request *isci_request = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(isci_request); - out: - if (status != SCI_SUCCESS) { - /* release dma memory on failure. */ - isci_request_free(isci_host, request); - request = NULL; - ret = SCI_FAILURE; - } + stp_req = &sci_req->stp.req; + sci_req->protocol = SCIC_STP_PROTOCOL; - *isci_request = request; - return ret; + copy = (task->data_dir == DMA_NONE) ? false : true; + + status = scic_io_request_construct_sata(sci_req, + task->total_xfer_len, + task->data_dir, + copy); + + if (status == SCI_SUCCESS) + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); + + return status; } +enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) +{ + enum sci_status status = SCI_SUCCESS; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + + /* check for management protocols */ + if (ireq->ttype == tmf_task) { + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + + if (tmf->tmf_code == isci_tmf_sata_srst_high || + tmf->tmf_code == isci_tmf_sata_srst_low) { + status = scic_sds_stp_soft_reset_request_construct(sci_req); + } else { + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: Request 0x%p received un-handled SAT " + "Protocol 0x%x.\n", + __func__, sci_req, tmf->tmf_code); + + return SCI_FAILURE; + } + } + + if (status == SCI_SUCCESS) + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); + + return status; +} + /** - * isci_request_process_response_iu() - This function sets the status and - * response iu, in the task struct, from the request object for the upper - * layer driver. - * @sas_task: This parameter is the task struct from the upper layer driver. - * @resp_iu: This parameter points to the response iu of the completed request. - * @dev: This parameter specifies the linux device struct. - * - * none. + * sci_req_tx_bytes - bytes transferred when reply underruns request + * @sci_req: request that was terminated early */ -static void isci_request_process_response_iu( - struct sas_task *task, - struct ssp_response_iu *resp_iu, - struct device *dev) +#define SCU_TASK_CONTEXT_SRAM 0x200000 +static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) { - dev_dbg(dev, - "%s: resp_iu = %p " - "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " - "resp_iu->response_data_len = %x, " - "resp_iu->sense_data_len = %x\nrepsonse data: ", + struct scic_sds_controller *scic = sci_req->owning_controller; + u32 ret_val = 0; + + if (readl(&scic->smu_registers->address_modifier) == 0) { + void __iomem *scu_reg_base = scic->scu_registers; + + /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where + * BAR1 is the scu_registers + * 0x20002C = 0x200000 + 0x2c + * = start of task context SRAM + offset of (type.ssp.data_offset) + * TCi is the io_tag of struct scic_sds_request + */ + ret_val = readl(scu_reg_base + + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + + ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag))); + } + + return ret_val; +} + +enum sci_status +scic_sds_request_start(struct scic_sds_request *request) +{ + if (request->device_sequence != + scic_sds_remote_device_get_sequence(request->target_device)) + return SCI_FAILURE; + + if (request->state_handlers->start_handler) + return request->state_handlers->start_handler(request); + + dev_warn(scic_to_dev(request->owning_controller), + "%s: SCIC IO Request requested to start while in wrong " + "state %d\n", + __func__, + sci_base_state_machine_get_state(&request->state_machine)); + + return SCI_FAILURE_INVALID_STATE; +} + +enum sci_status +scic_sds_io_request_terminate(struct scic_sds_request *request) +{ + if (request->state_handlers->abort_handler) + return request->state_handlers->abort_handler(request); + + dev_warn(scic_to_dev(request->owning_controller), + "%s: SCIC IO Request requested to abort while in wrong " + "state %d\n", __func__, - resp_iu, - resp_iu->status, - resp_iu->datapres, - resp_iu->response_data_len, - resp_iu->sense_data_len); + sci_base_state_machine_get_state(&request->state_machine)); - task->task_status.stat = resp_iu->status; + return SCI_FAILURE_INVALID_STATE; +} - /* libsas updates the task status fields based on the response iu. */ - sas_ssp_task_response(dev, task, resp_iu); +enum sci_status scic_sds_io_request_event_handler( + struct scic_sds_request *request, + u32 event_code) +{ + if (request->state_handlers->event_handler) + return request->state_handlers->event_handler(request, event_code); + + dev_warn(scic_to_dev(request->owning_controller), + "%s: SCIC IO Request given event code notification %x while " + "in wrong state %d\n", + __func__, + event_code, + sci_base_state_machine_get_state(&request->state_machine)); + + return SCI_FAILURE_INVALID_STATE; } /** - * isci_request_set_open_reject_status() - This function prepares the I/O - * completion for OPEN_REJECT conditions. - * @request: This parameter is the completed isci_request object. - * @response_ptr: This parameter specifies the service response for the I/O. - * @status_ptr: This parameter specifies the exec status for the I/O. - * @complete_to_host_ptr: This parameter specifies the action to be taken by - * the LLDD with respect to completing this request or forcing an abort - * condition on the I/O. - * @open_rej_reason: This parameter specifies the encoded reason for the - * abandon-class reject. * - * none. + * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start + * operation is to be executed. + * @frame_index: The frame index returned by the hardware for the reqeust + * object. + * + * This method invokes the core state frame handler for the + * SCIC_SDS_IO_REQUEST_T object. enum sci_status */ -static void isci_request_set_open_reject_status( - struct isci_request *request, - struct sas_task *task, - enum service_response *response_ptr, - enum exec_status *status_ptr, - enum isci_completion_selection *complete_to_host_ptr, - enum sas_open_rej_reason open_rej_reason) +enum sci_status scic_sds_io_request_frame_handler( + struct scic_sds_request *request, + u32 frame_index) { - /* Task in the target is done. */ - request->complete_in_target = true; - *response_ptr = SAS_TASK_UNDELIVERED; - *status_ptr = SAS_OPEN_REJECT; - *complete_to_host_ptr = isci_perform_normal_io_completion; - task->task_status.open_rej_reason = open_rej_reason; + if (request->state_handlers->frame_handler) + return request->state_handlers->frame_handler(request, frame_index); + + dev_warn(scic_to_dev(request->owning_controller), + "%s: SCIC IO Request given unexpected frame %x while in " + "state %d\n", + __func__, + frame_index, + sci_base_state_machine_get_state(&request->state_machine)); + + scic_sds_controller_release_frame(request->owning_controller, frame_index); + return SCI_FAILURE_INVALID_STATE; } -/** - * isci_request_handle_controller_specific_errors() - This function decodes - * controller-specific I/O completion error conditions. - * @request: This parameter is the completed isci_request object. - * @response_ptr: This parameter specifies the service response for the I/O. - * @status_ptr: This parameter specifies the exec status for the I/O. - * @complete_to_host_ptr: This parameter specifies the action to be taken by - * the LLDD with respect to completing this request or forcing an abort - * condition on the I/O. - * - * none. +/* + * This function copies response data for requests returning response data + * instead of sense data. + * @sci_req: This parameter specifies the request object for which to copy + * the response data. */ -static void isci_request_handle_controller_specific_errors( - struct isci_remote_device *isci_device, - struct isci_request *request, - struct sas_task *task, - enum service_response *response_ptr, - enum exec_status *status_ptr, - enum isci_completion_selection *complete_to_host_ptr) +void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) { - unsigned int cstatus; + void *resp_buf; + u32 len; + struct ssp_response_iu *ssp_response; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - cstatus = scic_request_get_controller_status(&request->sci); + ssp_response = &sci_req->ssp.rsp; - dev_dbg(&request->isci_host->pdev->dev, - "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " - "- controller status = 0x%x\n", - __func__, request, cstatus); + resp_buf = &isci_tmf->resp.resp_iu; - /* Decode the controller-specific errors; most - * important is to recognize those conditions in which - * the target may still have a task outstanding that - * must be aborted. - * - * Note that there are SCU completion codes being - * named in the decode below for which SCIC has already - * done work to handle them in a way other than as - * a controller-specific completion code; these are left - * in the decode below for completeness sake. - */ - switch (cstatus) { - case SCU_TASK_DONE_DMASETUP_DIRERR: - /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ - case SCU_TASK_DONE_XFERCNT_ERR: - /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ - if (task->task_proto == SAS_PROTOCOL_SMP) { - /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ - *response_ptr = SAS_TASK_COMPLETE; + len = min_t(u32, + SSP_RESP_IU_MAX_SIZE, + be32_to_cpu(ssp_response->response_data_len)); - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - *status_ptr = SAS_DEVICE_UNKNOWN; - else - *status_ptr = SAS_ABORTED_TASK; + memcpy(resp_buf, ssp_response->resp_data, len); +} - request->complete_in_target = true; +/* + * This method implements the action taken when a constructed + * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request. + * This method will, if necessary, allocate a TCi for the io request object and + * then will, if necessary, copy the constructed TC data into the actual TC + * buffer. If everything is successful the post context field is updated with + * the TCi so the controller can post the request to the hardware. enum sci_status + * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES + */ +static enum sci_status scic_sds_request_constructed_state_start_handler( + struct scic_sds_request *request) +{ + struct scu_task_context *task_context; - *complete_to_host_ptr = - isci_perform_normal_io_completion; - } else { - /* Task in the target is not done. */ - *response_ptr = SAS_TASK_UNDELIVERED; + if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + request->io_tag = + scic_controller_allocate_io_tag(request->owning_controller); + } - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - *status_ptr = SAS_DEVICE_UNKNOWN; - else - *status_ptr = SAM_STAT_TASK_ABORTED; + /* Record the IO Tag in the request */ + if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { + task_context = request->task_context_buffer; - request->complete_in_target = false; + task_context->task_index = scic_sds_io_tag_get_index(request->io_tag); - *complete_to_host_ptr = - isci_perform_error_io_completion; - } + switch (task_context->protocol_type) { + case SCU_TASK_CONTEXT_PROTOCOL_SMP: + case SCU_TASK_CONTEXT_PROTOCOL_SSP: + /* SSP/SMP Frame */ + task_context->type.ssp.tag = request->io_tag; + task_context->type.ssp.target_port_transfer_tag = 0xFFFF; + break; - break; + case SCU_TASK_CONTEXT_PROTOCOL_STP: + /* + * STP/SATA Frame + * task_context->type.stp.ncq_tag = request->ncq_tag; */ + break; - case SCU_TASK_DONE_CRC_ERR: - case SCU_TASK_DONE_NAK_CMD_ERR: - case SCU_TASK_DONE_EXCESS_DATA: - case SCU_TASK_DONE_UNEXP_FIS: - /* Also SCU_TASK_DONE_UNEXP_RESP: */ - case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ - case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ - case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ - /* These are conditions in which the target - * has completed the task, so that no cleanup - * is necessary. - */ - *response_ptr = SAS_TASK_COMPLETE; + case SCU_TASK_CONTEXT_PROTOCOL_NONE: + /* / @todo When do we set no protocol type? */ + break; - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - *status_ptr = SAS_DEVICE_UNKNOWN; - else - *status_ptr = SAS_ABORTED_TASK; + default: + /* This should never happen since we build the IO requests */ + break; + } - request->complete_in_target = true; + /* + * Check to see if we need to copy the task context buffer + * or have been building into the task context buffer */ + if (request->was_tag_assigned_by_user == false) { + scic_sds_controller_copy_task_context( + request->owning_controller, request); + } - *complete_to_host_ptr = isci_perform_normal_io_completion; - break; + /* Add to the post_context the io tag value */ + request->post_context |= scic_sds_io_tag_get_index(request->io_tag); + /* Everything is good go ahead and change state */ + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_STARTED); - /* Note that the only open reject completion codes seen here will be - * abandon-class codes; all others are automatically retried in the SCU. - */ - case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + return SCI_SUCCESS; + } - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_WRONG_DEST); - break; + return SCI_FAILURE_INSUFFICIENT_RESOURCES; +} - case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_terminate() request. Since the request + * has not yet been posted to the hardware the request transitions to the + * completed state. enum sci_status SCI_SUCCESS + */ +static enum sci_status scic_sds_request_constructed_state_abort_handler( + struct scic_sds_request *request) +{ + /* + * This request has been terminated by the user make sure that the correct + * status code is returned */ + scic_sds_request_set_status(request, + SCU_TASK_DONE_TASK_ABORT, + SCI_FAILURE_IO_TERMINATED); + + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; +} - /* Note - the return of AB0 will change when - * libsas implements detection of zone violations. - */ - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB0); - break; +/* + * ***************************************************************************** + * * STARTED STATE HANDLERS + * ***************************************************************************** */ - case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_terminate() request. Since the request + * has been posted to the hardware the io request state is changed to the + * aborting state. enum sci_status SCI_SUCCESS + */ +enum sci_status scic_sds_request_started_state_abort_handler( + struct scic_sds_request *request) +{ + if (request->has_started_substate_machine) + sci_base_state_machine_stop(&request->started_substate_machine); - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB1); - break; + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); + return SCI_SUCCESS; +} - case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: +/* + * scic_sds_request_started_state_tc_completion_handler() - This method process + * TC (task context) completions for normal IO request (i.e. Task/Abort + * Completions of type 0). This method will update the + * SCIC_SDS_IO_REQUEST_T::status field. + * @sci_req: This parameter specifies the request for which a completion + * occurred. + * @completion_code: This parameter specifies the completion code received from + * the SCU. + * + */ +static enum sci_status +scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req, + u32 completion_code) +{ + u8 datapres; + struct ssp_response_iu *resp_iu; - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB2); + /* + * TODO: Any SDMA return code of other than 0 is bad + * decode 0x003C0000 to determine SDMA status + */ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); break; - case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): + { + /* + * There are times when the SCU hardware will return an early + * response because the io request specified more data than is + * returned by the target device (mode pages, inquiry data, + * etc.). We must check the response stats to see if this is + * truly a failed request or a good request that just got + * completed early. + */ + struct ssp_response_iu *resp = &sci_req->ssp.rsp; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_swab32_cpy(&sci_req->ssp.rsp, + &sci_req->ssp.rsp, + word_cnt); + + if (resp->status == 0) { + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS_IO_DONE_EARLY); + } else { + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + } + } + break; - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB3); - break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): + { + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + sci_swab32_cpy(&sci_req->ssp.rsp, + &sci_req->ssp.rsp, + word_cnt); - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_BAD_DEST); + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); break; + } - case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: - - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_STP_NORES); + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): + /* + * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame + * guaranteed to be received before this completion status is + * posted? + */ + resp_iu = &sci_req->ssp.rsp; + datapres = resp_iu->datapres; + + if ((datapres == 0x01) || (datapres == 0x02)) { + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + } else + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; - case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: - - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_EPROTO); + /* only stp device gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): + if (sci_req->protocol == SCIC_STP_PROTOCOL) { + scic_sds_request_set_status( + sci_req, + SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT, + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); + } else { + scic_sds_request_set_status( + sci_req, + SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + } break; - case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: - - isci_request_set_open_reject_status( - request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_CONN_RATE); + /* both stp/ssp device gets suspended */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): + scic_sds_request_set_status( + sci_req, + SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT, + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); break; - case SCU_TASK_DONE_LL_R_ERR: - /* Also SCU_TASK_DONE_ACK_NAK_TO: */ - case SCU_TASK_DONE_LL_PERR: - case SCU_TASK_DONE_LL_SY_TERM: - /* Also SCU_TASK_DONE_NAK_ERR:*/ - case SCU_TASK_DONE_LL_LF_TERM: - /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ - case SCU_TASK_DONE_LL_ABORT_ERR: - case SCU_TASK_DONE_SEQ_INV_TYPE: - /* Also SCU_TASK_DONE_UNEXP_XR: */ - case SCU_TASK_DONE_XR_IU_LEN_ERR: - case SCU_TASK_DONE_INV_FIS_LEN: - /* Also SCU_TASK_DONE_XR_WD_LEN: */ - case SCU_TASK_DONE_SDMA_ERR: - case SCU_TASK_DONE_OFFSET_ERR: - case SCU_TASK_DONE_MAX_PLD_ERR: - case SCU_TASK_DONE_LF_ERR: - case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ - case SCU_TASK_DONE_SMP_LL_RX_ERR: - case SCU_TASK_DONE_UNEXP_DATA: - case SCU_TASK_DONE_UNEXP_SDBFIS: - case SCU_TASK_DONE_REG_ERR: - case SCU_TASK_DONE_SDB_ERR: - case SCU_TASK_DONE_TASK_ABORT: + /* neither ssp nor stp gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: - /* Task in the target is not done. */ - *response_ptr = SAS_TASK_UNDELIVERED; - *status_ptr = SAM_STAT_TASK_ABORTED; - request->complete_in_target = false; - - *complete_to_host_ptr = isci_perform_error_io_completion; + scic_sds_request_set_status( + sci_req, + SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); break; } + + /* + * TODO: This is probably wrong for ACK/NAK timeout conditions + */ + + /* In all cases we will treat this as the completion of the IO req. */ + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; } -/** - * isci_task_save_for_upper_layer_completion() - This function saves the - * request for later completion to the upper layer driver. - * @host: This parameter is a pointer to the host on which the the request - * should be queued (either as an error or success). - * @request: This parameter is the completed request. - * @response: This parameter is the response code for the completed task. - * @status: This parameter is the status code for the completed task. - * - * none. - */ -static void isci_task_save_for_upper_layer_completion( - struct isci_host *host, - struct isci_request *request, - enum service_response response, - enum exec_status status, - enum isci_completion_selection task_notification_selection) +enum sci_status +scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) { - struct sas_task *task = isci_request_access_task(request); - - task_notification_selection - = isci_task_set_completion_status(task, response, status, - task_notification_selection); + if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && + request->has_started_substate_machine == false) + return scic_sds_request_started_state_tc_completion_handler(request, completion_code); + else if (request->state_handlers->tc_completion_handler) + return request->state_handlers->tc_completion_handler(request, completion_code); + + dev_warn(scic_to_dev(request->owning_controller), + "%s: SCIC IO Request given task completion notification %x " + "while in wrong state %d\n", + __func__, + completion_code, + sci_base_state_machine_get_state(&request->state_machine)); - /* Tasks aborted specifically by a call to the lldd_abort_task - * function should not be completed to the host in the regular path. - */ - switch (task_notification_selection) { + return SCI_FAILURE_INVALID_STATE; - case isci_perform_normal_io_completion: +} - /* Normal notification (task_done) */ - dev_dbg(&host->pdev->dev, - "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_frame_handler() request. This method + * first determines the frame type received. If this is a response frame then + * the response data is copied to the io request response buffer for processing + * at completion time. If the frame type is not a response buffer an error is + * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE + */ +static enum sci_status +scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + u32 *frame_header; + struct ssp_frame_hdr ssp_hdr; + ssize_t word_cnt; + + status = scic_sds_unsolicited_frame_control_get_header( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + (void **)&frame_header); + + word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); + sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); + + if (ssp_hdr.frame_type == SSP_RESPONSE) { + struct ssp_response_iu *resp_iu; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + status = scic_sds_unsolicited_frame_control_get_buffer( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + (void **)&resp_iu); + + sci_swab32_cpy(&sci_req->ssp.rsp, + resp_iu, word_cnt); + + resp_iu = &sci_req->ssp.rsp; + + if ((resp_iu->datapres == 0x01) || + (resp_iu->datapres == 0x02)) { + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + } else + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); + } else { + /* This was not a response frame why did it get forwarded? */ + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: SCIC IO Request 0x%p received unexpected " + "frame %d type 0x%02x\n", __func__, - task, - task->task_status.resp, response, - task->task_status.stat, status); - /* Add to the completed list. */ - list_add(&request->completed_node, - &host->requests_to_complete); + sci_req, + frame_index, + ssp_hdr.frame_type); + } - /* Take the request off the device's pending request list. */ - list_del_init(&request->dev_node); - break; + /* + * In any case we are done with this frame buffer return it to the + * controller + */ + scic_sds_controller_release_frame( + sci_req->owning_controller, frame_index); - case isci_perform_aborted_io_completion: - /* No notification to libsas because this request is - * already in the abort path. - */ - dev_warn(&host->pdev->dev, - "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", - __func__, - task, - task->task_status.resp, response, - task->task_status.stat, status); + return SCI_SUCCESS; +} - /* Wake up whatever process was waiting for this - * request to complete. - */ - WARN_ON(request->io_request_completion == NULL); +/* + * ***************************************************************************** + * * COMPLETED STATE HANDLERS + * ***************************************************************************** */ - if (request->io_request_completion != NULL) { - /* Signal whoever is waiting that this - * request is complete. - */ - complete(request->io_request_completion); - } - break; +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_complete() request. This method frees up + * any io request resources that have been allocated and transitions the + * request to its final state. Consider stopping the state machine instead of + * transitioning to the final state? enum sci_status SCI_SUCCESS + */ +static enum sci_status scic_sds_request_completed_state_complete_handler( + struct scic_sds_request *request) +{ + if (request->was_tag_assigned_by_user != true) { + scic_controller_free_io_tag( + request->owning_controller, request->io_tag); + } - case isci_perform_error_io_completion: - /* Use sas_task_abort */ - dev_warn(&host->pdev->dev, - "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", - __func__, - task, - task->task_status.resp, response, - task->task_status.stat, status); - /* Add to the aborted list. */ - list_add(&request->completed_node, - &host->requests_to_errorback); - break; + if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) { + scic_sds_controller_release_frame( + request->owning_controller, request->saved_rx_frame_index); + } - default: - dev_warn(&host->pdev->dev, - "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", - __func__, - task, - task->task_status.resp, response, - task->task_status.stat, status); + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_FINAL); + return SCI_SUCCESS; +} - /* Add to the error to libsas list. */ - list_add(&request->completed_node, - &host->requests_to_errorback); +/* + * ***************************************************************************** + * * ABORTING STATE HANDLERS + * ***************************************************************************** */ + +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_terminate() request. This method is the + * io request aborting state abort handlers. On receipt of a multiple + * terminate requests the io request will transition to the completed state. + * This should not happen in normal operation. enum sci_status SCI_SUCCESS + */ +static enum sci_status scic_sds_request_aborting_state_abort_handler( + struct scic_sds_request *request) +{ + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; +} + +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_task_completion() request. This method + * decodes the completion type waiting for the abort task complete + * notification. When the abort task complete is received the io request + * transitions to the completed state. enum sci_status SCI_SUCCESS + */ +static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): + case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED + ); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * Unless we get some strange error wait for the task abort to complete + * TODO: Should there be a state change for this completion? */ break; } + + return SCI_SUCCESS; +} + +/* + * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T + * object receives a scic_sds_request_frame_handler() request. This method + * discards the unsolicited frame since we are waiting for the abort task + * completion. enum sci_status SCI_SUCCESS + */ +static enum sci_status scic_sds_request_aborting_state_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */ + + scic_sds_controller_release_frame( + sci_req->owning_controller, frame_index); + + return SCI_SUCCESS; } +static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { + [SCI_BASE_REQUEST_STATE_INITIAL] = { + }, + [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { + .start_handler = scic_sds_request_constructed_state_start_handler, + .abort_handler = scic_sds_request_constructed_state_abort_handler, + }, + [SCI_BASE_REQUEST_STATE_STARTED] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, + .frame_handler = scic_sds_request_started_state_frame_handler, + }, + [SCI_BASE_REQUEST_STATE_COMPLETED] = { + .complete_handler = scic_sds_request_completed_state_complete_handler, + }, + [SCI_BASE_REQUEST_STATE_ABORTING] = { + .abort_handler = scic_sds_request_aborting_state_abort_handler, + .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, + .frame_handler = scic_sds_request_aborting_state_frame_handler, + }, + [SCI_BASE_REQUEST_STATE_FINAL] = { + }, +}; + + /** - * isci_request_io_request_complete() - This function is called by the sci core - * when an io request completes. - * @isci_host: This parameter specifies the ISCI host object - * @request: This parameter is the completed isci_request object. - * @completion_status: This parameter specifies the completion status from the - * sci core. + * isci_request_process_response_iu() - This function sets the status and + * response iu, in the task struct, from the request object for the upper + * layer driver. + * @sas_task: This parameter is the task struct from the upper layer driver. + * @resp_iu: This parameter points to the response iu of the completed request. + * @dev: This parameter specifies the linux device struct. * * none. */ -void isci_request_io_request_complete( - struct isci_host *isci_host, - struct isci_request *request, - enum sci_io_status completion_status) +static void isci_request_process_response_iu( + struct sas_task *task, + struct ssp_response_iu *resp_iu, + struct device *dev) { - struct sas_task *task = isci_request_access_task(request); - struct ssp_response_iu *resp_iu; - void *resp_buf; - unsigned long task_flags; - struct isci_remote_device *isci_device = request->isci_device; - enum service_response response = SAS_TASK_UNDELIVERED; - enum exec_status status = SAS_ABORTED_TASK; - enum isci_request_status request_status; - enum isci_completion_selection complete_to_host - = isci_perform_normal_io_completion; - - dev_dbg(&isci_host->pdev->dev, - "%s: request = %p, task = %p,\n" - "task->data_dir = %d completion_status = 0x%x\n", + dev_dbg(dev, + "%s: resp_iu = %p " + "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " + "resp_iu->response_data_len = %x, " + "resp_iu->sense_data_len = %x\nrepsonse data: ", __func__, - request, - task, - task->data_dir, - completion_status); + resp_iu, + resp_iu->status, + resp_iu->datapres, + resp_iu->response_data_len, + resp_iu->sense_data_len); - spin_lock(&request->state_lock); - request_status = isci_request_get_state(request); + task->task_status.stat = resp_iu->status; - /* Decode the request status. Note that if the request has been - * aborted by a task management function, we don't care - * what the status is. - */ - switch (request_status) { + /* libsas updates the task status fields based on the response iu. */ + sas_ssp_task_response(dev, task, resp_iu); +} - case aborted: - /* "aborted" indicates that the request was aborted by a task - * management function, since once a task management request is - * perfomed by the device, the request only completes because - * of the subsequent driver terminate. - * - * Aborted also means an external thread is explicitly managing - * this request, so that we do not complete it up the stack. - * - * The target is still there (since the TMF was successful). - */ - request->complete_in_target = true; - response = SAS_TASK_COMPLETE; +/** + * isci_request_set_open_reject_status() - This function prepares the I/O + * completion for OPEN_REJECT conditions. + * @request: This parameter is the completed isci_request object. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * @complete_to_host_ptr: This parameter specifies the action to be taken by + * the LLDD with respect to completing this request or forcing an abort + * condition on the I/O. + * @open_rej_reason: This parameter specifies the encoded reason for the + * abandon-class reject. + * + * none. + */ +static void isci_request_set_open_reject_status( + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr, + enum isci_completion_selection *complete_to_host_ptr, + enum sas_open_rej_reason open_rej_reason) +{ + /* Task in the target is done. */ + request->complete_in_target = true; + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAS_OPEN_REJECT; + *complete_to_host_ptr = isci_perform_normal_io_completion; + task->task_status.open_rej_reason = open_rej_reason; +} - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if ((isci_device->status == isci_stopping) - || (isci_device->status == isci_stopped) - ) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; +/** + * isci_request_handle_controller_specific_errors() - This function decodes + * controller-specific I/O completion error conditions. + * @request: This parameter is the completed isci_request object. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * @complete_to_host_ptr: This parameter specifies the action to be taken by + * the LLDD with respect to completing this request or forcing an abort + * condition on the I/O. + * + * none. + */ +static void isci_request_handle_controller_specific_errors( + struct isci_remote_device *isci_device, + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr, + enum isci_completion_selection *complete_to_host_ptr) +{ + unsigned int cstatus; - complete_to_host = isci_perform_aborted_io_completion; - /* This was an aborted request. */ + cstatus = request->sci.scu_status; - spin_unlock(&request->state_lock); - break; + dev_dbg(&request->isci_host->pdev->dev, + "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " + "- controller status = 0x%x\n", + __func__, request, cstatus); - case aborting: - /* aborting means that the task management function tried and - * failed to abort the request. We need to note the request - * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the - * target as down. - * - * Aborting also means an external thread is explicitly managing - * this request, so that we do not complete it up the stack. - */ - request->complete_in_target = true; - response = SAS_TASK_UNDELIVERED; + /* Decode the controller-specific errors; most + * important is to recognize those conditions in which + * the target may still have a task outstanding that + * must be aborted. + * + * Note that there are SCU completion codes being + * named in the decode below for which SCIC has already + * done work to handle them in a way other than as + * a controller-specific completion code; these are left + * in the decode below for completeness sake. + */ + switch (cstatus) { + case SCU_TASK_DONE_DMASETUP_DIRERR: + /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ + case SCU_TASK_DONE_XFERCNT_ERR: + /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ + if (task->task_proto == SAS_PROTOCOL_SMP) { + /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ + *response_ptr = SAS_TASK_COMPLETE; - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - /* The device has been /is being stopped. Note that - * we ignore the quiesce state, since we are + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_PHY_DOWN; + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_ABORTED_TASK; - complete_to_host = isci_perform_aborted_io_completion; + request->complete_in_target = true; - /* This was an aborted request. */ + *complete_to_host_ptr = + isci_perform_normal_io_completion; + } else { + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; - spin_unlock(&request->state_lock); - break; + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAM_STAT_TASK_ABORTED; - case terminating: + request->complete_in_target = false; - /* This was an terminated request. This happens when - * the I/O is being terminated because of an action on - * the device (reset, tear down, etc.), and the I/O needs - * to be completed up the stack. + *complete_to_host_ptr = + isci_perform_error_io_completion; + } + + break; + + case SCU_TASK_DONE_CRC_ERR: + case SCU_TASK_DONE_NAK_CMD_ERR: + case SCU_TASK_DONE_EXCESS_DATA: + case SCU_TASK_DONE_UNEXP_FIS: + /* Also SCU_TASK_DONE_UNEXP_RESP: */ + case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ + /* These are conditions in which the target + * has completed the task, so that no cleanup + * is necessary. */ - request->complete_in_target = true; - response = SAS_TASK_UNDELIVERED; + *response_ptr = SAS_TASK_COMPLETE; /* See if the device has been/is being stopped. Note * that we ignore the quiesce state, since we are @@ -953,208 +1341,1206 @@ void isci_request_io_request_complete( */ if ((isci_device->status == isci_stopping) || (isci_device->status == isci_stopped)) - status = SAS_DEVICE_UNKNOWN; + *status_ptr = SAS_DEVICE_UNKNOWN; else - status = SAS_ABORTED_TASK; - - complete_to_host = isci_perform_aborted_io_completion; + *status_ptr = SAS_ABORTED_TASK; - /* This was a terminated request. */ + request->complete_in_target = true; - spin_unlock(&request->state_lock); + *complete_to_host_ptr = isci_perform_normal_io_completion; break; - default: - /* The request is done from an SCU HW perspective. */ - request->status = completed; + /* Note that the only open reject completion codes seen here will be + * abandon-class codes; all others are automatically retried in the SCU. + */ + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: - spin_unlock(&request->state_lock); + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_WRONG_DEST); + break; - /* This is an active request being completed from the core. */ - switch (completion_status) { + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: - case SCI_IO_FAILURE_RESPONSE_VALID: - dev_dbg(&isci_host->pdev->dev, - "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", - __func__, - request, - task); + /* Note - the return of AB0 will change when + * libsas implements detection of zone violations. + */ + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB0); + break; - if (sas_protocol_ata(task->task_proto)) { - resp_buf = &request->sci.stp.rsp; - isci_request_process_stp_response(task, - resp_buf); - } else if (SAS_PROTOCOL_SSP == task->task_proto) { + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: - /* crack the iu response buffer. */ - resp_iu = &request->sci.ssp.rsp; - isci_request_process_response_iu(task, resp_iu, - &isci_host->pdev->dev); + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB1); + break; - } else if (SAS_PROTOCOL_SMP == task->task_proto) { + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: - dev_err(&isci_host->pdev->dev, - "%s: SCI_IO_FAILURE_RESPONSE_VALID: " - "SAS_PROTOCOL_SMP protocol\n", - __func__); + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB2); + break; - } else - dev_err(&isci_host->pdev->dev, - "%s: unknown protocol\n", __func__); + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: - /* use the task status set in the task struct by the - * isci_request_process_response_iu call. - */ - request->complete_in_target = true; - response = task->task_status.resp; - status = task->task_status.stat; - break; + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB3); + break; - case SCI_IO_SUCCESS: - case SCI_IO_SUCCESS_IO_DONE_EARLY: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: - response = SAS_TASK_COMPLETE; - status = SAM_STAT_GOOD; - request->complete_in_target = true; + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_BAD_DEST); + break; - if (task->task_proto == SAS_PROTOCOL_SMP) { - void *rsp = &request->sci.smp.rsp; + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: - dev_dbg(&isci_host->pdev->dev, - "%s: SMP protocol completion\n", - __func__); + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_STP_NORES); + break; - sg_copy_from_buffer( - &task->smp_task.smp_resp, 1, - rsp, sizeof(struct smp_resp)); - } else if (completion_status - == SCI_IO_SUCCESS_IO_DONE_EARLY) { + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: - /* This was an SSP / STP / SATA transfer. - * There is a possibility that less data than - * the maximum was transferred. - */ - u32 transferred_length - = scic_io_request_get_number_of_bytes_transferred(&request->sci); + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_EPROTO); + break; - task->task_status.residual - = task->total_xfer_len - transferred_length; + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: - /* If there were residual bytes, call this an - * underrun. - */ - if (task->task_status.residual != 0) - status = SAS_DATA_UNDERRUN; + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_CONN_RATE); + break; - dev_dbg(&isci_host->pdev->dev, - "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", - __func__, - status); + case SCU_TASK_DONE_LL_R_ERR: + /* Also SCU_TASK_DONE_ACK_NAK_TO: */ + case SCU_TASK_DONE_LL_PERR: + case SCU_TASK_DONE_LL_SY_TERM: + /* Also SCU_TASK_DONE_NAK_ERR:*/ + case SCU_TASK_DONE_LL_LF_TERM: + /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ + case SCU_TASK_DONE_LL_ABORT_ERR: + case SCU_TASK_DONE_SEQ_INV_TYPE: + /* Also SCU_TASK_DONE_UNEXP_XR: */ + case SCU_TASK_DONE_XR_IU_LEN_ERR: + case SCU_TASK_DONE_INV_FIS_LEN: + /* Also SCU_TASK_DONE_XR_WD_LEN: */ + case SCU_TASK_DONE_SDMA_ERR: + case SCU_TASK_DONE_OFFSET_ERR: + case SCU_TASK_DONE_MAX_PLD_ERR: + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ + case SCU_TASK_DONE_SMP_LL_RX_ERR: + case SCU_TASK_DONE_UNEXP_DATA: + case SCU_TASK_DONE_UNEXP_SDBFIS: + case SCU_TASK_DONE_REG_ERR: + case SCU_TASK_DONE_SDB_ERR: + case SCU_TASK_DONE_TASK_ABORT: + default: + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAM_STAT_TASK_ABORTED; + request->complete_in_target = false; - } else - dev_dbg(&isci_host->pdev->dev, - "%s: SCI_IO_SUCCESS\n", - __func__); + *complete_to_host_ptr = isci_perform_error_io_completion; + break; + } +} - break; +/** + * isci_task_save_for_upper_layer_completion() - This function saves the + * request for later completion to the upper layer driver. + * @host: This parameter is a pointer to the host on which the the request + * should be queued (either as an error or success). + * @request: This parameter is the completed request. + * @response: This parameter is the response code for the completed task. + * @status: This parameter is the status code for the completed task. + * + * none. + */ +static void isci_task_save_for_upper_layer_completion( + struct isci_host *host, + struct isci_request *request, + enum service_response response, + enum exec_status status, + enum isci_completion_selection task_notification_selection) +{ + struct sas_task *task = isci_request_access_task(request); - case SCI_IO_FAILURE_TERMINATED: - dev_dbg(&isci_host->pdev->dev, - "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", - __func__, - request, - task); + task_notification_selection + = isci_task_set_completion_status(task, response, status, + task_notification_selection); - /* The request was terminated explicitly. No handling - * is needed in the SCSI error handler path. - */ - request->complete_in_target = true; - response = SAS_TASK_UNDELIVERED; + /* Tasks aborted specifically by a call to the lldd_abort_task + * function should not be completed to the host in the regular path. + */ + switch (task_notification_selection) { - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; + case isci_perform_normal_io_completion: - complete_to_host = isci_perform_normal_io_completion; - break; + /* Normal notification (task_done) */ + dev_dbg(&host->pdev->dev, + "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + /* Add to the completed list. */ + list_add(&request->completed_node, + &host->requests_to_complete); - case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: + /* Take the request off the device's pending request list. */ + list_del_init(&request->dev_node); + break; - isci_request_handle_controller_specific_errors( - isci_device, request, task, &response, &status, - &complete_to_host); + case isci_perform_aborted_io_completion: + /* No notification to libsas because this request is + * already in the abort path. + */ + dev_warn(&host->pdev->dev, + "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); - break; + /* Wake up whatever process was waiting for this + * request to complete. + */ + WARN_ON(request->io_request_completion == NULL); - case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: - /* This is a special case, in that the I/O completion - * is telling us that the device needs a reset. - * In order for the device reset condition to be - * noticed, the I/O has to be handled in the error - * handler. Set the reset flag and cause the - * SCSI error thread to be scheduled. - */ - spin_lock_irqsave(&task->task_state_lock, task_flags); - task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + if (request->io_request_completion != NULL) { + + /* Signal whoever is waiting that this + * request is complete. + */ + complete(request->io_request_completion); + } + break; + + case isci_perform_error_io_completion: + /* Use sas_task_abort */ + dev_warn(&host->pdev->dev, + "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + /* Add to the aborted list. */ + list_add(&request->completed_node, + &host->requests_to_errorback); + break; + + default: + dev_warn(&host->pdev->dev, + "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + + /* Add to the error to libsas list. */ + list_add(&request->completed_node, + &host->requests_to_errorback); + break; + } +} + +static void isci_request_io_request_complete(struct isci_host *isci_host, + struct isci_request *request, + enum sci_io_status completion_status) +{ + struct sas_task *task = isci_request_access_task(request); + struct ssp_response_iu *resp_iu; + void *resp_buf; + unsigned long task_flags; + struct isci_remote_device *isci_device = request->isci_device; + enum service_response response = SAS_TASK_UNDELIVERED; + enum exec_status status = SAS_ABORTED_TASK; + enum isci_request_status request_status; + enum isci_completion_selection complete_to_host + = isci_perform_normal_io_completion; + + dev_dbg(&isci_host->pdev->dev, + "%s: request = %p, task = %p,\n" + "task->data_dir = %d completion_status = 0x%x\n", + __func__, + request, + task, + task->data_dir, + completion_status); + + spin_lock(&request->state_lock); + request_status = isci_request_get_state(request); + + /* Decode the request status. Note that if the request has been + * aborted by a task management function, we don't care + * what the status is. + */ + switch (request_status) { + + case aborted: + /* "aborted" indicates that the request was aborted by a task + * management function, since once a task management request is + * perfomed by the device, the request only completes because + * of the subsequent driver terminate. + * + * Aborted also means an external thread is explicitly managing + * this request, so that we do not complete it up the stack. + * + * The target is still there (since the TMF was successful). + */ + request->complete_in_target = true; + response = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if ((isci_device->status == isci_stopping) + || (isci_device->status == isci_stopped) + ) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_aborted_io_completion; + /* This was an aborted request. */ + + spin_unlock(&request->state_lock); + break; + + case aborting: + /* aborting means that the task management function tried and + * failed to abort the request. We need to note the request + * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the + * target as down. + * + * Aborting also means an external thread is explicitly managing + * this request, so that we do not complete it up the stack. + */ + request->complete_in_target = true; + response = SAS_TASK_UNDELIVERED; + + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + /* The device has been /is being stopped. Note that + * we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_PHY_DOWN; + + complete_to_host = isci_perform_aborted_io_completion; + + /* This was an aborted request. */ + + spin_unlock(&request->state_lock); + break; + + case terminating: + + /* This was an terminated request. This happens when + * the I/O is being terminated because of an action on + * the device (reset, tear down, etc.), and the I/O needs + * to be completed up the stack. + */ + request->complete_in_target = true; + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_aborted_io_completion; + + /* This was a terminated request. */ + + spin_unlock(&request->state_lock); + break; + + default: + + /* The request is done from an SCU HW perspective. */ + request->status = completed; + + spin_unlock(&request->state_lock); + + /* This is an active request being completed from the core. */ + switch (completion_status) { + + case SCI_IO_FAILURE_RESPONSE_VALID: + dev_dbg(&isci_host->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", + __func__, + request, + task); + + if (sas_protocol_ata(task->task_proto)) { + resp_buf = &request->sci.stp.rsp; + isci_request_process_stp_response(task, + resp_buf); + } else if (SAS_PROTOCOL_SSP == task->task_proto) { + + /* crack the iu response buffer. */ + resp_iu = &request->sci.ssp.rsp; + isci_request_process_response_iu(task, resp_iu, + &isci_host->pdev->dev); + + } else if (SAS_PROTOCOL_SMP == task->task_proto) { + + dev_err(&isci_host->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID: " + "SAS_PROTOCOL_SMP protocol\n", + __func__); + + } else + dev_err(&isci_host->pdev->dev, + "%s: unknown protocol\n", __func__); + + /* use the task status set in the task struct by the + * isci_request_process_response_iu call. + */ + request->complete_in_target = true; + response = task->task_status.resp; + status = task->task_status.stat; + break; + + case SCI_IO_SUCCESS: + case SCI_IO_SUCCESS_IO_DONE_EARLY: + + response = SAS_TASK_COMPLETE; + status = SAM_STAT_GOOD; + request->complete_in_target = true; + + if (task->task_proto == SAS_PROTOCOL_SMP) { + void *rsp = &request->sci.smp.rsp; + + dev_dbg(&isci_host->pdev->dev, + "%s: SMP protocol completion\n", + __func__); + + sg_copy_from_buffer( + &task->smp_task.smp_resp, 1, + rsp, sizeof(struct smp_resp)); + } else if (completion_status + == SCI_IO_SUCCESS_IO_DONE_EARLY) { + + /* This was an SSP / STP / SATA transfer. + * There is a possibility that less data than + * the maximum was transferred. + */ + u32 transferred_length = sci_req_tx_bytes(&request->sci); + + task->task_status.residual + = task->total_xfer_len - transferred_length; + + /* If there were residual bytes, call this an + * underrun. + */ + if (task->task_status.residual != 0) + status = SAS_DATA_UNDERRUN; + + dev_dbg(&isci_host->pdev->dev, + "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", + __func__, + status); + + } else + dev_dbg(&isci_host->pdev->dev, + "%s: SCI_IO_SUCCESS\n", + __func__); + + break; + + case SCI_IO_FAILURE_TERMINATED: + dev_dbg(&isci_host->pdev->dev, + "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", + __func__, + request, + task); + + /* The request was terminated explicitly. No handling + * is needed in the SCSI error handler path. + */ + request->complete_in_target = true; + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_normal_io_completion; + break; + + case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: + + isci_request_handle_controller_specific_errors( + isci_device, request, task, &response, &status, + &complete_to_host); + + break; + + case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: + /* This is a special case, in that the I/O completion + * is telling us that the device needs a reset. + * In order for the device reset condition to be + * noticed, the I/O has to be handled in the error + * handler. Set the reset flag and cause the + * SCSI error thread to be scheduled. + */ + spin_lock_irqsave(&task->task_state_lock, task_flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; spin_unlock_irqrestore(&task->task_state_lock, task_flags); - /* Fail the I/O. */ - response = SAS_TASK_UNDELIVERED; - status = SAM_STAT_TASK_ABORTED; + /* Fail the I/O. */ + response = SAS_TASK_UNDELIVERED; + status = SAM_STAT_TASK_ABORTED; + + complete_to_host = isci_perform_error_io_completion; + request->complete_in_target = false; + break; + + default: + /* Catch any otherwise unhandled error codes here. */ + dev_warn(&isci_host->pdev->dev, + "%s: invalid completion code: 0x%x - " + "isci_request = %p\n", + __func__, completion_status, request); + + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if ((isci_device->status == isci_stopping) || + (isci_device->status == isci_stopped)) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_error_io_completion; + request->complete_in_target = false; + break; + } + break; + } + + isci_request_unmap_sgl(request, isci_host->pdev); + + /* Put the completed request on the correct list */ + isci_task_save_for_upper_layer_completion(isci_host, request, response, + status, complete_to_host + ); + + /* complete the io request to the core. */ + scic_controller_complete_io(&isci_host->sci, + &isci_device->sci, + &request->sci); + /* set terminated handle so it cannot be completed or + * terminated again, and to cause any calls into abort + * task to recognize the already completed case. + */ + request->terminated = true; + + isci_host_can_dequeue(isci_host, 1); +} + +/** + * scic_sds_request_initial_state_enter() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial + * base request is constructed. Entry into the initial state sets all handlers + * for the io request object to their default handlers. none + */ +static void scic_sds_request_initial_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_INITIAL + ); +} + +/** + * scic_sds_request_constructed_state_enter() - + * @object: The io request object that is to enter the constructed state. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers + * for the the constructed state. none + */ +static void scic_sds_request_constructed_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_CONSTRUCTED + ); +} + +/** + * scic_sds_request_started_state_enter() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a + * SCSI Task request we must enter the started substate machine. none + */ +static void scic_sds_request_started_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_STARTED + ); + + /* + * Most of the request state machines have a started substate machine so + * start its execution on the entry to the started state. */ + if (sci_req->has_started_substate_machine == true) + sci_base_state_machine_start(&sci_req->started_substate_machine); +} + +/** + * scic_sds_request_started_state_exit() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST + * object. + * + * This method implements the actions taken when exiting the + * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be + * to stop the started substate machine. none + */ +static void scic_sds_request_started_state_exit(void *object) +{ + struct scic_sds_request *sci_req = object; + + if (sci_req->has_started_substate_machine == true) + sci_base_state_machine_stop(&sci_req->started_substate_machine); +} + +/** + * scic_sds_request_completed_state_enter() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST + * object. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the + * SCIC_SDS_IO_REQUEST has completed. The method will decode the request + * completion status and convert it to an enum sci_status to return in the + * completion callback function. none + */ +static void scic_sds_request_completed_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + struct scic_sds_controller *scic = + scic_sds_request_get_controller(sci_req); + struct isci_host *ihost = scic_to_ihost(scic); + struct isci_request *ireq = sci_req_to_ireq(sci_req); + + SET_STATE_HANDLER(sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Tell the SCI_USER that the IO request is complete */ + if (sci_req->is_task_management_request == false) + isci_request_io_request_complete(ihost, ireq, + sci_req->sci_status); + else + isci_task_request_complete(ihost, ireq, sci_req->sci_status); +} + +/** + * scic_sds_request_aborting_state_enter() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST + * object. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_ABORTING state. none + */ +static void scic_sds_request_aborting_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + /* Setting the abort bit in the Task Context is required by the silicon. */ + sci_req->task_context_buffer->abort = 1; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_ABORTING + ); +} + +/** + * scic_sds_request_final_state_enter() - + * @object: This parameter specifies the base object for which the state + * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. + * + * This method implements the actions taken when entering the + * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the + * state handlers in place. none + */ +static void scic_sds_request_final_state_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCI_BASE_REQUEST_STATE_FINAL + ); +} + +static const struct sci_base_state scic_sds_request_state_table[] = { + [SCI_BASE_REQUEST_STATE_INITIAL] = { + .enter_state = scic_sds_request_initial_state_enter, + }, + [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { + .enter_state = scic_sds_request_constructed_state_enter, + }, + [SCI_BASE_REQUEST_STATE_STARTED] = { + .enter_state = scic_sds_request_started_state_enter, + .exit_state = scic_sds_request_started_state_exit + }, + [SCI_BASE_REQUEST_STATE_COMPLETED] = { + .enter_state = scic_sds_request_completed_state_enter, + }, + [SCI_BASE_REQUEST_STATE_ABORTING] = { + .enter_state = scic_sds_request_aborting_state_enter, + }, + [SCI_BASE_REQUEST_STATE_FINAL] = { + .enter_state = scic_sds_request_final_state_enter, + }, +}; + +static void scic_sds_general_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, struct scic_sds_request *sci_req) +{ + sci_base_state_machine_construct(&sci_req->state_machine, sci_req, + scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL); + sci_base_state_machine_start(&sci_req->state_machine); + + sci_req->io_tag = io_tag; + sci_req->owning_controller = scic; + sci_req->target_device = sci_dev; + sci_req->has_started_substate_machine = false; + sci_req->protocol = SCIC_NO_PROTOCOL; + sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; + sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); + + sci_req->sci_status = SCI_SUCCESS; + sci_req->scu_status = 0; + sci_req->post_context = 0xFFFFFFFF; + + sci_req->is_task_management_request = false; + + if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + sci_req->was_tag_assigned_by_user = false; + sci_req->task_context_buffer = NULL; + } else { + sci_req->was_tag_assigned_by_user = true; + + sci_req->task_context_buffer = + scic_sds_controller_get_task_context_buffer(scic, io_tag); + } +} + +static enum sci_status +scic_io_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, struct scic_sds_request *sci_req) +{ + struct domain_device *dev = sci_dev_to_domain(sci_dev); + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); + + if (sci_dev->rnc.remote_node_index == + SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_REMOTE_DEVICE; + + if (dev->dev_type == SAS_END_DEV) + scic_sds_ssp_io_request_assign_buffers(sci_req); + else if ((dev->dev_type == SATA_DEV) || + (dev->tproto & SAS_PROTOCOL_STP)) { + scic_sds_stp_request_assign_buffers(sci_req); + memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); + } else if (dev_is_expander(dev)) { + scic_sds_smp_request_assign_buffers(sci_req); + memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); + } else + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + if (status == SCI_SUCCESS) { + memset(sci_req->task_context_buffer, 0, + offsetof(struct scu_task_context, sgl_pair_ab)); + } + + return status; +} + +enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, struct scic_sds_request *sci_req) +{ + struct domain_device *dev = sci_dev_to_domain(sci_dev); + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); + + if (dev->dev_type == SAS_END_DEV) { + scic_sds_ssp_task_request_assign_buffers(sci_req); + + sci_req->has_started_substate_machine = true; + + /* Construct the started sub-state machine. */ + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + sci_req, + scic_sds_io_request_started_task_mgmt_substate_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION + ); + } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) + scic_sds_stp_request_assign_buffers(sci_req); + else + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + if (status == SCI_SUCCESS) { + sci_req->is_task_management_request = true; + memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); + } + + return status; +} + +static enum sci_status isci_request_ssp_request_construct( + struct isci_request *request) +{ + enum sci_status status; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p\n", + __func__, + request); + status = scic_io_request_construct_basic_ssp(&request->sci); + return status; +} + +static enum sci_status isci_request_stp_request_construct( + struct isci_request *request) +{ + struct sas_task *task = isci_request_access_task(request); + enum sci_status status; + struct host_to_dev_fis *register_fis; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p\n", + __func__, + request); + + /* Get the host_to_dev_fis from the core and copy + * the fis from the task into it. + */ + register_fis = isci_sata_task_to_fis_copy(task); + + status = scic_io_request_construct_basic_sata(&request->sci); + + /* Set the ncq tag in the fis, from the queue + * command in the task. + */ + if (isci_sata_is_task_ncq(task)) { + + isci_sata_set_ncq_tag( + register_fis, + task + ); + } + + return status; +} + +/* + * isci_smp_request_build() - This function builds the smp request. + * @ireq: This parameter points to the isci_request allocated in the + * request construct function. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_smp_request_build(struct isci_request *ireq) +{ + enum sci_status status = SCI_FAILURE; + struct sas_task *task = isci_request_access_task(ireq); + struct scic_sds_request *sci_req = &ireq->sci; + + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: request = %p\n", __func__, ireq); + + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: smp_req len = %d\n", + __func__, + task->smp_task.smp_req.length); + + /* copy the smp_command to the address; */ + sg_copy_to_buffer(&task->smp_task.smp_req, 1, + &sci_req->smp.cmd, + sizeof(struct smp_req)); + + status = scic_io_request_construct_smp(sci_req); + if (status != SCI_SUCCESS) + dev_warn(&ireq->isci_host->pdev->dev, + "%s: failed with status = %d\n", + __func__, + status); + + return status; +} + +/** + * isci_io_request_build() - This function builds the io request object. + * @isci_host: This parameter specifies the ISCI host object + * @request: This parameter points to the isci_request object allocated in the + * request construct function. + * @sci_device: This parameter is the handle for the sci core's remote device + * object that is the destination for this request. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_io_request_build( + struct isci_host *isci_host, + struct isci_request *request, + struct isci_remote_device *isci_device) +{ + enum sci_status status = SCI_SUCCESS; + struct sas_task *task = isci_request_access_task(request); + struct scic_sds_remote_device *sci_device = &isci_device->sci; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = 0x%p; request = %p, " + "num_scatter = %d\n", + __func__, + isci_device, + request, + task->num_scatter); + + /* map the sgl addresses, if present. + * libata does the mapping for sata devices + * before we get the request. + */ + if (task->num_scatter && + !sas_protocol_ata(task->task_proto) && + !(SAS_PROTOCOL_SMP & task->task_proto)) { + + request->num_sg_entries = dma_map_sg( + &isci_host->pdev->dev, + task->scatter, + task->num_scatter, + task->data_dir + ); + + if (request->num_sg_entries == 0) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + } + + /* build the common request object. For now, + * we will let the core allocate the IO tag. + */ + status = scic_io_request_construct(&isci_host->sci, sci_device, + SCI_CONTROLLER_INVALID_IO_TAG, + &request->sci); + + if (status != SCI_SUCCESS) { + dev_warn(&isci_host->pdev->dev, + "%s: failed request construct\n", + __func__); + return SCI_FAILURE; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + status = isci_smp_request_build(request); + break; + case SAS_PROTOCOL_SSP: + status = isci_request_ssp_request_construct(request); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + status = isci_request_stp_request_construct(request); + break; + default: + dev_warn(&isci_host->pdev->dev, + "%s: unknown protocol\n", __func__); + return SCI_FAILURE; + } + + return SCI_SUCCESS; +} + +/** + * isci_request_alloc_core() - This function gets the request object from the + * isci_host dma cache. + * @isci_host: This parameter specifies the ISCI host object + * @isci_request: This parameter will contain the pointer to the new + * isci_request object. + * @isci_device: This parameter is the pointer to the isci remote device object + * that is the destination for this request. + * @gfp_flags: This parameter specifies the os allocation flags. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static int isci_request_alloc_core( + struct isci_host *isci_host, + struct isci_request **isci_request, + struct isci_remote_device *isci_device, + gfp_t gfp_flags) +{ + int ret = 0; + dma_addr_t handle; + struct isci_request *request; + - complete_to_host = isci_perform_error_io_completion; - request->complete_in_target = false; - break; + /* get pointer to dma memory. This actually points + * to both the isci_remote_device object and the + * sci object. The isci object is at the beginning + * of the memory allocated here. + */ + request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); + if (!request) { + dev_warn(&isci_host->pdev->dev, + "%s: dma_pool_alloc returned NULL\n", __func__); + return -ENOMEM; + } - default: - /* Catch any otherwise unhandled error codes here. */ - dev_warn(&isci_host->pdev->dev, - "%s: invalid completion code: 0x%x - " - "isci_request = %p\n", - __func__, completion_status, request); + /* initialize the request object. */ + spin_lock_init(&request->state_lock); + request->request_daddr = handle; + request->isci_host = isci_host; + request->isci_device = isci_device; + request->io_request_completion = NULL; + request->terminated = false; - response = SAS_TASK_UNDELIVERED; + request->num_sg_entries = 0; - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; + request->complete_in_target = false; - complete_to_host = isci_perform_error_io_completion; - request->complete_in_target = false; - break; - } - break; + INIT_LIST_HEAD(&request->completed_node); + INIT_LIST_HEAD(&request->dev_node); + + *isci_request = request; + isci_request_change_state(request, allocated); + + return ret; +} + +static int isci_request_alloc_io( + struct isci_host *isci_host, + struct sas_task *task, + struct isci_request **isci_request, + struct isci_remote_device *isci_device, + gfp_t gfp_flags) +{ + int retval = isci_request_alloc_core(isci_host, isci_request, + isci_device, gfp_flags); + + if (!retval) { + (*isci_request)->ttype_ptr.io_task_ptr = task; + (*isci_request)->ttype = io_task; + + task->lldd_task = *isci_request; } + return retval; +} - isci_request_unmap_sgl(request, isci_host->pdev); +/** + * isci_request_alloc_tmf() - This function gets the request object from the + * isci_host dma cache and initializes the relevant fields as a sas_task. + * @isci_host: This parameter specifies the ISCI host object + * @sas_task: This parameter is the task struct from the upper layer driver. + * @isci_request: This parameter will contain the pointer to the new + * isci_request object. + * @isci_device: This parameter is the pointer to the isci remote device object + * that is the destination for this request. + * @gfp_flags: This parameter specifies the os allocation flags. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +int isci_request_alloc_tmf( + struct isci_host *isci_host, + struct isci_tmf *isci_tmf, + struct isci_request **isci_request, + struct isci_remote_device *isci_device, + gfp_t gfp_flags) +{ + int retval = isci_request_alloc_core(isci_host, isci_request, + isci_device, gfp_flags); - /* Put the completed request on the correct list */ - isci_task_save_for_upper_layer_completion(isci_host, request, response, - status, complete_to_host - ); + if (!retval) { - /* complete the io request to the core. */ - scic_controller_complete_io(&isci_host->sci, - &isci_device->sci, - &request->sci); - /* set terminated handle so it cannot be completed or - * terminated again, and to cause any calls into abort - * task to recognize the already completed case. + (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; + (*isci_request)->ttype = tmf_task; + } + return retval; +} + +/** + * isci_request_execute() - This function allocates the isci_request object, + * all fills in some common fields. + * @isci_host: This parameter specifies the ISCI host object + * @sas_task: This parameter is the task struct from the upper layer driver. + * @isci_request: This parameter will contain the pointer to the new + * isci_request object. + * @gfp_flags: This parameter specifies the os allocation flags. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +int isci_request_execute( + struct isci_host *isci_host, + struct sas_task *task, + struct isci_request **isci_request, + gfp_t gfp_flags) +{ + int ret = 0; + struct scic_sds_remote_device *sci_device; + enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + struct isci_remote_device *isci_device; + struct isci_request *request; + unsigned long flags; + + isci_device = task->dev->lldd_dev; + sci_device = &isci_device->sci; + + /* do common allocation and init of request object. */ + ret = isci_request_alloc_io( + isci_host, + task, + &request, + isci_device, + gfp_flags + ); + + if (ret) + goto out; + + status = isci_io_request_build(isci_host, request, isci_device); + if (status != SCI_SUCCESS) { + dev_warn(&isci_host->pdev->dev, + "%s: request_construct failed - status = 0x%x\n", + __func__, + status); + goto out; + } + + spin_lock_irqsave(&isci_host->scic_lock, flags); + + /* send the request, let the core assign the IO TAG. */ + status = scic_controller_start_io(&isci_host->sci, sci_device, + &request->sci, + SCI_CONTROLLER_INVALID_IO_TAG); + if (status != SCI_SUCCESS && + status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + dev_warn(&isci_host->pdev->dev, + "%s: failed request start (0x%x)\n", + __func__, status); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + goto out; + } + + /* Either I/O started OK, or the core has signaled that + * the device needs a target reset. + * + * In either case, hold onto the I/O for later. + * + * Update it's status and add it to the list in the + * remote device object. */ - request->terminated = true; + isci_request_change_state(request, started); + list_add(&request->dev_node, &isci_device->reqs_in_process); - isci_host_can_dequeue(isci_host, 1); + if (status == SCI_SUCCESS) { + /* Save the tag for possible task mgmt later. */ + request->io_tag = request->sci.io_tag; + } else { + /* The request did not really start in the + * hardware, so clear the request handle + * here so no terminations will be done. + */ + request->terminated = true; + } + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + if (status == + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* Signal libsas that we need the SCSI error + * handler thread to work on this I/O and that + * we want a device reset. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Cause this task to be scheduled in the SCSI error + * handler thread. + */ + isci_execpath_callback(isci_host, task, + sas_task_abort); + + /* Change the status, since we are holding + * the I/O until it is managed by the SCSI + * error handler. + */ + status = SCI_SUCCESS; + } + + out: + if (status != SCI_SUCCESS) { + /* release dma memory on failure. */ + isci_request_free(isci_host, request); + request = NULL; + ret = SCI_FAILURE; + } + + *isci_request = request; + return ret; } + + + diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 06786ece9bb..932ea767c8c 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -58,7 +58,8 @@ #include "isci.h" #include "host.h" -#include "scic_sds_request.h" +#include "scu_task_context.h" +#include "stp_request.h" /** * struct isci_request_status - This enum defines the possible states of an I/O @@ -82,6 +83,151 @@ enum task_type { tmf_task = 1 }; +enum sci_request_protocol { + SCIC_NO_PROTOCOL, + SCIC_SMP_PROTOCOL, + SCIC_SSP_PROTOCOL, + SCIC_STP_PROTOCOL +}; /* XXX remove me, use sas_task.dev instead */; + +struct scic_sds_request { + /** + * This field contains the information for the base request state machine. + */ + struct sci_base_state_machine state_machine; + + /** + * This field simply points to the controller to which this IO request + * is associated. + */ + struct scic_sds_controller *owning_controller; + + /** + * This field simply points to the remote device to which this IO request + * is associated. + */ + struct scic_sds_remote_device *target_device; + + /** + * This field is utilized to determine if the SCI user is managing + * the IO tag for this request or if the core is managing it. + */ + bool was_tag_assigned_by_user; + + /** + * This field indicates the IO tag for this request. The IO tag is + * comprised of the task_index and a sequence count. The sequence count + * is utilized to help identify tasks from one life to another. + */ + u16 io_tag; + + /** + * This field specifies the protocol being utilized for this + * IO request. + */ + enum sci_request_protocol protocol; + + /** + * This field indicates the completion status taken from the SCUs + * completion code. It indicates the completion result for the SCU hardware. + */ + u32 scu_status; + + /** + * This field indicates the completion status returned to the SCI user. It + * indicates the users view of the io request completion. + */ + u32 sci_status; + + /** + * This field contains the value to be utilized when posting (e.g. Post_TC, + * Post_TC_Abort) this request to the silicon. + */ + u32 post_context; + + struct scu_task_context *task_context_buffer; + struct scu_task_context tc ____cacheline_aligned; + + /* could be larger with sg chaining */ + #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) + struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); + + /** + * This field indicates if this request is a task management request or + * normal IO request. + */ + bool is_task_management_request; + + /** + * This field indicates that this request contains an initialized started + * substate machine. + */ + bool has_started_substate_machine; + + /** + * This field is a pointer to the stored rx frame data. It is used in STP + * internal requests and SMP response frames. If this field is non-NULL the + * saved frame must be released on IO request completion. + * + * @todo In the future do we want to keep a list of RX frame buffers? + */ + u32 saved_rx_frame_index; + + /** + * This field specifies the data necessary to manage the sub-state + * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state. + */ + struct sci_base_state_machine started_substate_machine; + + /** + * This field specifies the current state handlers in place for this + * IO Request object. This field is updated each time the request + * changes state. + */ + const struct scic_sds_io_request_state_handler *state_handlers; + + /** + * This field in the recorded device sequence for the io request. This is + * recorded during the build operation and is compared in the start + * operation. If the sequence is different then there was a change of + * devices from the build to start operations. + */ + u8 device_sequence; + + union { + struct { + union { + struct ssp_cmd_iu cmd; + struct ssp_task_iu tmf; + }; + union { + struct ssp_response_iu rsp; + u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; + }; + } ssp; + + struct { + struct smp_req cmd; + struct smp_resp rsp; + } smp; + + struct { + struct scic_sds_stp_request req; + struct host_to_dev_fis cmd; + struct dev_to_host_fis rsp; + } stp; + }; + +}; + +static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req) +{ + struct scic_sds_request *sci_req; + + sci_req = container_of(stp_req, typeof(*sci_req), stp.req); + return sci_req; +} + struct isci_request { enum isci_request_status status; enum task_type ttype; @@ -125,6 +271,273 @@ static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_ return ireq; } +/** + * enum sci_base_request_states - This enumeration depicts all the states for + * the common request state machine. + * + * + */ +enum sci_base_request_states { + /** + * Simply the initial state for the base request state machine. + */ + SCI_BASE_REQUEST_STATE_INITIAL, + + /** + * This state indicates that the request has been constructed. This state + * is entered from the INITIAL state. + */ + SCI_BASE_REQUEST_STATE_CONSTRUCTED, + + /** + * This state indicates that the request has been started. This state is + * entered from the CONSTRUCTED state. + */ + SCI_BASE_REQUEST_STATE_STARTED, + + /** + * This state indicates that the request has completed. + * This state is entered from the STARTED state. This state is entered from + * the ABORTING state. + */ + SCI_BASE_REQUEST_STATE_COMPLETED, + + /** + * This state indicates that the request is in the process of being + * terminated/aborted. + * This state is entered from the CONSTRUCTED state. + * This state is entered from the STARTED state. + */ + SCI_BASE_REQUEST_STATE_ABORTING, + + /** + * Simply the final state for the base request state machine. + */ + SCI_BASE_REQUEST_STATE_FINAL, +}; + +typedef enum sci_status (*scic_sds_io_request_handler_t) + (struct scic_sds_request *request); +typedef enum sci_status (*scic_sds_io_request_frame_handler_t) + (struct scic_sds_request *req, u32 frame); +typedef enum sci_status (*scic_sds_io_request_event_handler_t) + (struct scic_sds_request *req, u32 event); +typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t) + (struct scic_sds_request *req, u32 completion_code); + +/** + * struct scic_sds_io_request_state_handler - This is the SDS core definition + * of the state handlers. + * + * + */ +struct scic_sds_io_request_state_handler { + /** + * The start_handler specifies the method invoked when a user attempts to + * start a request. + */ + scic_sds_io_request_handler_t start_handler; + + /** + * The abort_handler specifies the method invoked when a user attempts to + * abort a request. + */ + scic_sds_io_request_handler_t abort_handler; + + /** + * The complete_handler specifies the method invoked when a user attempts to + * complete a request. + */ + scic_sds_io_request_handler_t complete_handler; + + scic_sds_io_request_task_completion_handler_t tc_completion_handler; + scic_sds_io_request_event_handler_t event_handler; + scic_sds_io_request_frame_handler_t frame_handler; + +}; + +extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[]; + +/** + * scic_sds_request_get_controller() - + * + * This macro will return the controller for this io request object + */ +#define scic_sds_request_get_controller(sci_req) \ + ((sci_req)->owning_controller) + +/** + * scic_sds_request_get_device() - + * + * This macro will return the device for this io request object + */ +#define scic_sds_request_get_device(sci_req) \ + ((sci_req)->target_device) + +/** + * scic_sds_request_get_port() - + * + * This macro will return the port for this io request object + */ +#define scic_sds_request_get_port(sci_req) \ + scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req)) + +/** + * scic_sds_request_get_post_context() - + * + * This macro returns the constructed post context result for the io request. + */ +#define scic_sds_request_get_post_context(sci_req) \ + ((sci_req)->post_context) + +/** + * scic_sds_request_get_task_context() - + * + * This is a helper macro to return the os handle for this request object. + */ +#define scic_sds_request_get_task_context(request) \ + ((request)->task_context_buffer) + +/** + * scic_sds_request_set_status() - + * + * This macro will set the scu hardware status and sci request completion + * status for an io request. + */ +#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \ + { \ + (request)->scu_status = (scu_status_code); \ + (request)->sci_status = (sci_status_code); \ + } + +#define scic_sds_request_complete(a_request) \ + ((a_request)->state_handlers->complete_handler(a_request)) + + +extern enum sci_status +scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code); + +/** + * SCU_SGL_ZERO() - + * + * This macro zeros the hardware SGL element data + */ +#define SCU_SGL_ZERO(scu_sge) \ + { \ + (scu_sge).length = 0; \ + (scu_sge).address_lower = 0; \ + (scu_sge).address_upper = 0; \ + (scu_sge).address_modifier = 0; \ + } + +/** + * SCU_SGL_COPY() - + * + * This macro copys the SGL Element data from the host os to the hardware SGL + * elment data + */ +#define SCU_SGL_COPY(scu_sge, os_sge) \ + { \ + (scu_sge).length = sg_dma_len(sg); \ + (scu_sge).address_upper = \ + upper_32_bits(sg_dma_address(sg)); \ + (scu_sge).address_lower = \ + lower_32_bits(sg_dma_address(sg)); \ + (scu_sge).address_modifier = 0; \ + } + +void scic_sds_request_build_sgl(struct scic_sds_request *sci_req); +void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req); +void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req); +enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); +enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); +void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req); +enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, + u32 event_code); +enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index); +enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); +enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req); + +/** + * enum _scic_sds_io_request_started_task_mgmt_substates - This enumeration + * depicts all of the substates for a task management request to be + * performed in the STARTED super-state. + * + * + */ +enum scic_sds_raw_request_started_task_mgmt_substates { + /** + * The AWAIT_TC_COMPLETION sub-state indicates that the started raw + * task management request is waiting for the transmission of the + * initial frame (i.e. command, task, etc.). + */ + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION, + + /** + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, +}; + + +/** + * enum _scic_sds_smp_request_started_substates - This enumeration depicts all + * of the substates for a SMP request to be performed in the STARTED + * super-state. + * + * + */ +enum scic_sds_smp_request_started_substates { + /** + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE, + + /** + * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is + * waiting for the transmission of the initial frame (i.e. command, task, etc.). + */ + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, +}; + + + +/* XXX open code in caller */ +static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, + dma_addr_t phys_addr) +{ + struct isci_request *ireq = sci_req_to_ireq(sci_req); + dma_addr_t offset; + + BUG_ON(phys_addr < ireq->request_daddr); + + offset = phys_addr - ireq->request_daddr; + + BUG_ON(offset >= sizeof(*ireq)); + + return (char *)ireq + offset; +} + +/* XXX open code in caller */ +static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, + void *virt_addr) +{ + struct isci_request *ireq = sci_req_to_ireq(sci_req); + + char *requested_addr = (char *)virt_addr; + char *base_addr = (char *)ireq; + + BUG_ON(requested_addr < base_addr); + BUG_ON((requested_addr - base_addr) >= sizeof(*ireq)); + + return ireq->request_daddr + (requested_addr - base_addr); +} + /** * This function gets the status of the request object. * @request: This parameter points to the isci_request object @@ -337,12 +750,6 @@ static inline void isci_request_unmap_sgl( } } - -void isci_request_io_request_complete( - struct isci_host *isci_host, - struct isci_request *request, - enum sci_io_status completion_status); - /** * isci_request_io_request_get_next_sge() - This function is called by the sci * core to retrieve the next sge for a given request. @@ -385,13 +792,16 @@ static inline void *isci_request_io_request_get_next_sge( return ret; } - -void isci_terminate_pending_requests( - struct isci_host *isci_host, - struct isci_remote_device *isci_device, - enum isci_request_status new_request_state); - - - - +void isci_terminate_pending_requests(struct isci_host *isci_host, + struct isci_remote_device *isci_device, + enum isci_request_status new_request_state); +enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + struct scic_sds_request *sci_req); +enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); +enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); +enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req); +void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); +void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index 08dabf08f7d..b9b9271d473 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -56,8 +56,6 @@ #include <scsi/sas.h> #include "isci.h" #include "remote_device.h" -#include "scic_io_request.h" -#include "scic_task_request.h" #include "task.h" #include "request.h" #include "sata.h" diff --git a/drivers/scsi/isci/smp_request.c b/drivers/scsi/isci/smp_request.c new file mode 100644 index 00000000000..d4750a761c5 --- /dev/null +++ b/drivers/scsi/isci/smp_request.c @@ -0,0 +1,518 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <scsi/sas.h> +#include "state_machine.h" +#include "remote_device.h" +#include "request.h" +#include "scu_completion_codes.h" +#include "scu_task_context.h" +#include "host.h" + +static void scu_smp_request_construct_task_context( + struct scic_sds_request *sci_req, + struct smp_req *smp_req); + +void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req) +{ + if (sci_req->was_tag_assigned_by_user == false) + sci_req->task_context_buffer = &sci_req->tc; +} + +/* + * This function will fill in the SCU Task Context for a SMP request. The + * following important settings are utilized: -# task_type == + * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type + * (i.e. non-raw frame) is being utilized to perform task management. -# + * control_frame == 1. This ensures that the proper endianess is set so + * that the bytes are transmitted in the right order for a smp request frame. + * @sci_req: This parameter specifies the smp request object being + * constructed. + * + */ +static void +scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, + struct smp_req *smp_req) +{ + dma_addr_t dma_addr; + struct scic_sds_controller *scic; + struct scic_sds_remote_device *sci_dev; + struct scic_sds_port *sci_port; + struct scu_task_context *task_context; + ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); + + /* byte swap the smp request. */ + sci_swab32_cpy(&sci_req->smp.cmd, smp_req, + word_cnt); + + task_context = scic_sds_request_get_task_context(sci_req); + + scic = scic_sds_request_get_controller(sci_req); + sci_dev = scic_sds_request_get_device(sci_req); + sci_port = scic_sds_request_get_port(sci_req); + + /* + * Fill in the TC with the its required data + * 00h + */ + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = sci_dev->connection_rate; + task_context->protocol_engine_index = + scic_sds_controller_get_protocol_engine_group(scic); + task_context->logical_port_index = scic_sds_port_get_index(sci_port); + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; + task_context->abort = 0; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + /* 04h */ + task_context->remote_node_index = sci_dev->rnc.remote_node_index; + task_context->command_code = 0; + task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; + + /* 08h */ + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 1; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + /* 0ch */ + task_context->address_modifier = 0; + + /* 10h */ + task_context->ssp_command_iu_length = smp_req->req_len; + + /* 14h */ + task_context->transfer_length_bytes = 0; + + /* + * 18h ~ 30h, protocol specific + * since commandIU has been build by framework at this point, we just + * copy the frist DWord from command IU to this location. */ + memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); + + /* + * 40h + * "For SMP you could program it to zero. We would prefer that way + * so that done code will be consistent." - Venki + */ + task_context->task_phase = 0; + + if (sci_req->was_tag_assigned_by_user) { + /* + * Build the task context now since we have already read + * the data + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(scic) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(sci_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + scic_sds_io_tag_get_index(sci_req->io_tag)); + } else { + /* + * Build the task context now since we have already read + * the data. + * I/O tag index is not assigned because we have to wait + * until we get a TCi. + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(scic) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(sci_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); + } + + /* + * Copy the physical address for the command buffer to the SCU Task + * Context command buffer should not contain command header. + */ + dma_addr = scic_io_request_get_dma_addr(sci_req, + ((char *) &sci_req->smp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SMP response comes as UF, so no need to set response IU address. */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + +/* + * This function processes an unsolicited frame while the SMP request is waiting + * for a response frame. It will copy the response data, release the + * unsolicited frame, and transition the request to the + * SCI_BASE_REQUEST_STATE_COMPLETED state. + * @sci_req: This parameter specifies the request for which the + * unsolicited frame was received. + * @frame_index: This parameter indicates the unsolicited frame index that + * should contain the response. + * + * This function returns an indication of whether the response frame was handled + * successfully or not. SCI_SUCCESS Currently this value is always returned and + * indicates successful processing of the TC response. + */ +static enum sci_status +scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + void *frame_header; + struct smp_resp *rsp_hdr = &sci_req->smp.rsp; + ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); + + status = scic_sds_unsolicited_frame_control_get_header( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + &frame_header); + + /* byte swap the header. */ + sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); + + if (rsp_hdr->frame_type == SMP_RESPONSE) { + void *smp_resp; + + status = scic_sds_unsolicited_frame_control_get_buffer( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + &smp_resp); + + word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / + sizeof(u32); + + sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, + smp_resp, word_cnt); + + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); + } else { + /* This was not a response frame why did it get forwarded? */ + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: SCIC SMP Request 0x%p received unexpected frame " + "%d type 0x%02x\n", + __func__, + sci_req, + frame_index, + rsp_hdr->frame_type); + + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_SMP_FRM_TYPE_ERR, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } + + scic_sds_controller_release_frame(sci_req->owning_controller, + frame_index); + + return SCI_SUCCESS; +} + + +/** + * This method processes an abnormal TC completion while the SMP request is + * waiting for a response frame. It decides what happened to the IO based + * on TC completion status. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* + * In the AWAIT RESPONSE state, any TC completion is unexpected. + * but if the TC has success status, we complete the IO anyway. */ + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + /* + * These status has been seen in a specific LSI expander, which sometimes + * is not able to send smp response within 2 ms. This causes our hardware + * break the connection and set TC completion with one of these SMP_XXX_XX_ERR + * status. For these type of error, we ask scic user to retry the request. */ + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + + +/** + * This method processes the completions transport layer (TL) status to + * determine if the SMP request was sent successfully. If the SMP request + * was sent successfully, then the state for the SMP request transits to + * waiting for a response frame. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + + +static const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[] = { + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, + .frame_handler = scic_sds_smp_request_await_response_frame_handler, + }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, + } +}; + +/** + * This method performs the actions required when entering the + * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This + * includes setting the IO request state handlers for this sub-state. + * @object: This parameter specifies the request object for which the sub-state + * change is occurring. + * + * none. + */ +static void scic_sds_smp_request_started_await_response_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_smp_request_started_substate_handler_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE + ); +} + +/** + * This method performs the actions required when entering the + * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION sub-state. + * This includes setting the SMP request state handlers for this sub-state. + * @object: This parameter specifies the request object for which the sub-state + * change is occurring. + * + * none. + */ +static void scic_sds_smp_request_started_await_tc_completion_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_smp_request_started_substate_handler_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION + ); +} + +static const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { + .enter_state = scic_sds_smp_request_started_await_response_substate_enter, + }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { + .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter, + }, +}; + +/** + * This method is called by the SCI user to build an SMP IO request. + * + * - The user must have previously called scic_io_request_construct() on the + * supplied IO request. Indicate if the controller successfully built the IO + * request. SCI_SUCCESS This value is returned if the IO request was + * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned + * if the remote_device does not support the SMP protocol. + * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not + * properly set the association between the SCIC IO request and the user's IO + * request. + */ +enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) +{ + struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); + + if (!smp_req) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + + sci_req->protocol = SCIC_SMP_PROTOCOL; + sci_req->has_started_substate_machine = true; + + /* Construct the started sub-state machine. */ + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + sci_req, + scic_sds_smp_request_started_substate_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE + ); + + /* Construct the SMP SCU Task Context */ + memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req)); + + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. */ + if (smp_req->req_len == 0) { + switch (smp_req->func) { + case SMP_DISCOVER: + case SMP_REPORT_PHY_ERR_LOG: + case SMP_REPORT_PHY_SATA: + case SMP_REPORT_ROUTE_INFO: + smp_req->req_len = 2; + break; + case SMP_CONF_ROUTE_INFO: + case SMP_PHY_CONTROL: + case SMP_PHY_TEST_FUNCTION: + smp_req->req_len = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + + scu_smp_request_construct_task_context(sci_req, smp_req); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); + + kfree(smp_req); + + return SCI_SUCCESS; +} diff --git a/drivers/scsi/isci/ssp_request.c b/drivers/scsi/isci/ssp_request.c new file mode 100644 index 00000000000..4b6317aeef7 --- /dev/null +++ b/drivers/scsi/isci/ssp_request.c @@ -0,0 +1,240 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" +#include "request.h" +#include "state_machine.h" +#include "scu_task_context.h" +#include "scu_completion_codes.h" + +/** + * This method processes the completions transport layer (TL) status to + * determine if the RAW task management frame was sent successfully. If the + * raw frame was sent successfully, then the state for the task request + * transitions to waiting for a response frame. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE + ); + break; + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + /* + * Currently, the decision is to simply allow the task request to + * timeout if the task IU wasn't received successfully. + * There is a potential for receiving multiple task responses if we + * decide to send the task IU again. */ + dev_warn(scic_to_dev(sci_req->owning_controller), + "%s: TaskRequest:0x%p CompletionCode:%x - " + "ACK/NAK timeout\n", + __func__, + sci_req, + completion_code); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * This method is responsible for processing a terminate/abort request for this + * TC while the request is waiting for the task management response + * unsolicited frame. + * @sci_req: This parameter specifies the request for which the + * termination was requested. + * + * This method returns an indication as to whether the abort request was + * successfully handled. need to update to ensure the received UF doesn't cause + * damage to subsequent requests (i.e. put the extended tag in a holding + * pattern for this particular device). + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler( + struct scic_sds_request *request) +{ + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; +} + +/** + * This method processes an unsolicited frame while the task mgmt request is + * waiting for a response frame. It will copy the response data, release + * the unsolicited frame, and transition the request to the + * SCI_BASE_REQUEST_STATE_COMPLETED state. + * @sci_req: This parameter specifies the request for which the + * unsolicited frame was received. + * @frame_index: This parameter indicates the unsolicited frame index that + * should contain the response. + * + * This method returns an indication of whether the TC response frame was + * handled successfully or not. SCI_SUCCESS Currently this value is always + * returned and indicates successful processing of the TC response. Should + * probably update to check frame type and make sure it is a response frame. + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler( + struct scic_sds_request *request, + u32 frame_index) +{ + scic_sds_io_request_copy_response(request); + + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + scic_sds_controller_release_frame(request->owning_controller, + frame_index); + return SCI_SUCCESS; +} + +static const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[] = { + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, + }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { + .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, + .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, + } +}; + +/** + * This method performs the actions required when entering the + * SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION + * sub-state. This includes setting the IO request state handlers for this + * sub-state. + * @object: This parameter specifies the request object for which the sub-state + * change is occurring. + * + * none. + */ +static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_ssp_task_request_started_substate_handler_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION + ); +} + +/** + * This method performs the actions required when entering the + * SCIC_SDS_IO_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This + * includes setting the IO request state handlers for this sub-state. + * @object: This parameter specifies the request object for which the sub-state + * change is occurring. + * + * none. + */ +static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_ssp_task_request_started_substate_handler_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE + ); +} + +const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[] = { + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { + .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, + }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { + .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, + }, +}; + diff --git a/drivers/scsi/isci/stp_request.c b/drivers/scsi/isci/stp_request.c new file mode 100644 index 00000000000..298086afa5a --- /dev/null +++ b/drivers/scsi/isci/stp_request.c @@ -0,0 +1,1590 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <scsi/sas.h> +#include "sas.h" +#include "state_machine.h" +#include "remote_device.h" +#include "stp_request.h" +#include "unsolicited_frame_control.h" +#include "scu_completion_codes.h" +#include "scu_event_codes.h" +#include "scu_task_context.h" +#include "request.h" + +void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) +{ + if (sci_req->was_tag_assigned_by_user == false) + sci_req->task_context_buffer = &sci_req->tc; +} + +/** + * This method is will fill in the SCU Task Context for any type of SATA + * request. This is called from the various SATA constructors. + * @sci_req: The general IO request object which is to be used in + * constructing the SCU task context. + * @task_context: The buffer pointer for the SCU task context which is being + * constructed. + * + * The general io request construction is complete. The buffer assignment for + * the command buffer is complete. none Revisit task context construction to + * determine what is common for SSP/SMP/STP task context structures. + */ +static void scu_sata_reqeust_construct_task_context( + struct scic_sds_request *sci_req, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct scic_sds_controller *controller; + struct scic_sds_remote_device *target_device; + struct scic_sds_port *target_port; + + controller = scic_sds_request_get_controller(sci_req); + target_device = scic_sds_request_get_device(sci_req); + target_port = scic_sds_request_get_port(sci_req); + + /* Fill in the TC with the its required data */ + task_context->abort = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->initiator_request = 1; + task_context->connection_rate = target_device->connection_rate; + task_context->protocol_engine_index = + scic_sds_controller_get_protocol_engine_group(controller); + task_context->logical_port_index = + scic_sds_port_get_index(target_port); + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = + scic_sds_remote_device_get_index(sci_req->target_device); + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + task_context->task_phase = 0x01; + + task_context->ssp_command_iu_length = + (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); + + /* Set the first word of the H2D REG FIS */ + task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; + + if (sci_req->was_tag_assigned_by_user) { + /* + * Build the task context now since we have already read + * the data + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + scic_sds_io_tag_get_index(sci_req->io_tag)); + } else { + /* + * Build the task context now since we have already read + * the data. + * I/O tag index is not assigned because we have to wait + * until we get a TCi. + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); + } + + /* + * Copy the physical address for the command buffer to the SCU Task + * Context. We must offset the command buffer by 4 bytes because the + * first 4 bytes are transfered in the body of the TC. + */ + dma_addr = scic_io_request_get_dma_addr(sci_req, + ((char *) &sci_req->stp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SATA Requests do not have a response buffer */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + +/** + * + * @sci_req: + * + * This method will perform any general sata request construction. What part of + * SATA IO request construction is general? none + */ +static void scic_sds_stp_non_ncq_request_construct( + struct scic_sds_request *sci_req) +{ + sci_req->has_started_substate_machine = true; +} + +/** + * + * @sci_req: This parameter specifies the request to be constructed as an + * optimized request. + * @optimized_task_type: This parameter specifies whether the request is to be + * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A + * value of 1 indicates NCQ. + * + * This method will perform request construction common to all types of STP + * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method + * returns an indication as to whether the construction was successful. + */ +static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, + u8 optimized_task_type, + u32 len, + enum dma_data_direction dir) +{ + struct scu_task_context *task_context = sci_req->task_context_buffer; + + /* Build the STP task context structure */ + scu_sata_reqeust_construct_task_context(sci_req, task_context); + + /* Copy over the SGL elements */ + scic_sds_request_build_sgl(sci_req); + + /* Copy over the number of bytes to be transfered */ + task_context->transfer_length_bytes = len; + + if (dir == DMA_TO_DEVICE) { + /* + * The difference between the DMA IN and DMA OUT request task type + * values are consistent with the difference between FPDMA READ + * and FPDMA WRITE values. Add the supplied task type parameter + * to this difference to set the task type properly for this + * DATA OUT (WRITE) case. */ + task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT + - SCU_TASK_TYPE_DMA_IN); + } else { + /* + * For the DATA IN (READ) case, simply save the supplied + * optimized task type. */ + task_context->task_type = optimized_task_type; + } +} + +/** + * + * @sci_req: This parameter specifies the request to be constructed. + * + * This method will construct the STP UDMA request and its associated TC data. + * This method returns an indication as to whether the construction was + * successful. SCI_SUCCESS Currently this method always returns this value. + */ +enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) +{ + scic_sds_stp_optimized_request_construct(sci_req, + SCU_TASK_TYPE_FPDMAQ_READ, + len, dir); + return SCI_SUCCESS; +} + +/** + * scu_stp_raw_request_construct_task_context - + * @sci_req: This parameter specifies the STP request object for which to + * construct a RAW command frame task context. + * @task_context: This parameter specifies the SCU specific task context buffer + * to construct. + * + * This method performs the operations common to all SATA/STP requests + * utilizing the raw frame method. none + */ +static void scu_stp_raw_request_construct_task_context( + struct scic_sds_stp_request *stp_req, + struct scu_task_context *task_context) +{ + struct scic_sds_request *sci_req = to_sci_req(stp_req); + + scu_sata_reqeust_construct_task_context(sci_req, task_context); + + task_context->control_frame = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; + task_context->type.stp.fis_type = FIS_REGH2D; + task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); +} + +void scic_stp_io_request_set_ncq_tag( + struct scic_sds_request *req, + u16 ncq_tag) +{ + /** + * @note This could be made to return an error to the user if the user + * attempts to set the NCQ tag in the wrong state. + */ + req->task_context_buffer->type.stp.ncq_tag = ncq_tag; +} + +/** + * + * @sci_req: + * + * Get the next SGL element from the request. - Check on which SGL element pair + * we are working - if working on SLG pair element A - advance to element B - + * else - check to see if there are more SGL element pairs for this IO request + * - if there are more SGL element pairs - advance to the next pair and return + * element A struct scu_sgl_element* + */ +static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) +{ + struct scu_sgl_element *current_sgl; + struct scic_sds_request *sci_req = to_sci_req(stp_req); + struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; + + if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + if (pio_sgl->sgl_pair->B.address_lower == 0 && + pio_sgl->sgl_pair->B.address_upper == 0) { + current_sgl = NULL; + } else { + pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; + current_sgl = &pio_sgl->sgl_pair->B; + } + } else { + if (pio_sgl->sgl_pair->next_pair_lower == 0 && + pio_sgl->sgl_pair->next_pair_upper == 0) { + current_sgl = NULL; + } else { + u64 phys_addr; + + phys_addr = pio_sgl->sgl_pair->next_pair_upper; + phys_addr <<= 32; + phys_addr |= pio_sgl->sgl_pair->next_pair_lower; + + pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); + pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; + current_sgl = &pio_sgl->sgl_pair->A; + } + } + + return current_sgl; +} + +/** + * + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. + */ +static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * + * @request: This parameter specifies the request for which a frame has been + * received. + * @frame_index: This parameter specifies the index of the frame that has been + * received. + * + * This method processes frames received from the target while waiting for a + * device to host register FIS. If a non-register FIS is received during this + * time, it is treated as a protocol violation from an IO perspective. Indicate + * if the received frame was processed successfully. + */ +static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +/* --------------------------------------------------------------------------- */ + +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, + } +}; + +static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_non_data_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req + ); +} + +static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_non_data_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE + ); +} + +/* --------------------------------------------------------------------------- */ + +static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter, + }, +}; + +enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req) +{ + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + scic_sds_stp_non_ncq_request_construct(sci_req); + + /* Build the STP task context structure */ + scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + sci_req, + scic_sds_stp_request_started_non_data_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} + +#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ + +/* transmit DATA_FIS from (current sgl + offset) for input + * parameter length. current sgl and offset is alreay stored in the IO request + */ +static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( + struct scic_sds_request *sci_req, + u32 length) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scu_task_context *task_context; + struct scu_sgl_element *current_sgl; + + /* Recycle the TC and reconstruct it for sending out DATA FIS containing + * for the data from current_sgl+offset for the input length + */ + task_context = scic_sds_controller_get_task_context_buffer(scic, + sci_req->io_tag); + + if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) + current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; + else + current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; + + /* update the TC */ + task_context->command_iu_upper = current_sgl->address_upper; + task_context->command_iu_lower = current_sgl->address_lower; + task_context->transfer_length_bytes = length; + task_context->type.stp.fis_type = FIS_DATA; + + /* send the new TC out. */ + return scic_controller_continue_io(sci_req); +} + +static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) +{ + + struct scu_sgl_element *current_sgl; + u32 sgl_offset; + u32 remaining_bytes_in_current_sgl = 0; + enum sci_status status = SCI_SUCCESS; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + sgl_offset = stp_req->type.pio.request_current.sgl_offset; + + if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); + remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; + } else { + current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); + remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; + } + + + if (stp_req->type.pio.pio_transfer_bytes > 0) { + if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { + /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ + status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); + if (status == SCI_SUCCESS) { + stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; + + /* update the current sgl, sgl_offset and save for future */ + current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); + sgl_offset = 0; + } + } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { + /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ + scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); + + if (status == SCI_SUCCESS) { + /* Sgl offset will be adjusted and saved for future */ + sgl_offset += stp_req->type.pio.pio_transfer_bytes; + current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; + stp_req->type.pio.pio_transfer_bytes = 0; + } + } + } + + if (status == SCI_SUCCESS) { + stp_req->type.pio.request_current.sgl_offset = sgl_offset; + } + + return status; +} + +/** + * + * @stp_request: The request that is used for the SGL processing. + * @data_buffer: The buffer of data to be copied. + * @length: The length of the data transfer. + * + * Copy the data from the buffer for the length specified to the IO reqeust SGL + * specified data region. enum sci_status + */ +static enum sci_status +scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, + u8 *data_buf, u32 len) +{ + struct scic_sds_request *sci_req; + struct isci_request *ireq; + u8 *src_addr; + int copy_len; + struct sas_task *task; + struct scatterlist *sg; + void *kaddr; + int total_len = len; + + sci_req = to_sci_req(stp_req); + ireq = sci_req_to_ireq(sci_req); + task = isci_request_access_task(ireq); + src_addr = data_buf; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (total_len > 0) { + struct page *page = sg_page(sg); + + copy_len = min_t(int, total_len, sg_dma_len(sg)); + kaddr = kmap_atomic(page, KM_IRQ0); + memcpy(kaddr + sg->offset, src_addr, copy_len); + kunmap_atomic(kaddr, KM_IRQ0); + total_len -= copy_len; + src_addr += copy_len; + sg = sg_next(sg); + } + } else { + BUG_ON(task->total_xfer_len < total_len); + memcpy(task->scatter, src_addr, total_len); + } + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: The PIO DATA IN request that is to receive the data. + * @data_buffer: The buffer to copy from. + * + * Copy the data buffer to the io request data region. enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( + struct scic_sds_stp_request *sci_req, + u8 *data_buffer) +{ + enum sci_status status; + + /* + * If there is less than 1K remaining in the transfer request + * copy just the data for the transfer */ + if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { + status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); + + if (status == SCI_SUCCESS) + sci_req->type.pio.pio_transfer_bytes = 0; + } else { + /* We are transfering the whole frame so copy */ + status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); + + if (status == SCI_SUCCESS) + sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; + } + + return status; +} + +/** + * + * @sci_req: + * @completion_code: + * + * enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED + ); + break; + } + + return status; +} + +static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_PIO_SETUP: + /* Get from the frame buffer the PIO Setup Data */ + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + /* Get the data from the PIO Setup The SCU Hardware returns + * first word in the frame_header and the rest of the data is in + * the frame buffer so we need to back up one dword + */ + + /* transfer_count: first 16bits in the 4th dword */ + stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; + + /* ending_status: 4th byte in the 3rd dword */ + stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + sci_req->stp.rsp.status = stp_req->type.pio.ending_status; + + /* The next state is dependent on whether the + * request was PIO Data-in or Data out + */ + if (task->data_dir == DMA_FROM_DEVICE) { + sci_base_state_machine_change_state(&sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); + } else if (task->data_dir == DMA_TO_DEVICE) { + /* Transmit data */ + status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + if (status != SCI_SUCCESS) + break; + sci_base_state_machine_change_state(&sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); + } + break; + case FIS_SETDEVBITS: + sci_base_state_machine_change_state(&sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + break; + case FIS_REGD2H: + if (frame_header->status & ATA_BUSY) { + /* Now why is the drive sending a D2H Register FIS when + * it is still busy? Do nothing since we are still in + * the right state. + */ + dev_dbg(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received " + "D2H Register FIS with BSY status " + "0x%x\n", __func__, stp_req, + frame_header->status); + break; + } + + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.req, + frame_header, + frame_buffer); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + default: + /* FIXME: what do we do here? */ + break; + } + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + struct sata_fis_data *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + if (frame_header->fis_type == FIS_DATA) { + if (stp_req->type.pio.request_current.sgl_pair == NULL) { + sci_req->saved_rx_frame_index = frame_index; + stp_req->type.pio.pio_transfer_bytes = 0; + } else { + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, + (u8 *)frame_buffer); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + } + + /* Check for the end of the transfer, are there more + * bytes remaining for this data transfer + */ + if (status != SCI_SUCCESS || + stp_req->type.pio.pio_transfer_bytes != 0) + return status; + + if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } else { + sci_base_state_machine_change_state(&sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + } + } else { + dev_err(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received frame %d " + "with fis type 0x%02x when expecting a data " + "fis.\n", __func__, stp_req, frame_index, + frame_header->fis_type); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + } + + return status; +} + + +/** + * + * @sci_req: + * @completion_code: + * + * enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( + + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + bool all_frames_transferred = false; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* Transmit data */ + if (stp_req->type.pio.pio_transfer_bytes != 0) { + status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + if (status == SCI_SUCCESS) { + if (stp_req->type.pio.pio_transfer_bytes == 0) + all_frames_transferred = true; + } + } else if (stp_req->type.pio.pio_transfer_bytes == 0) { + /* + * this will happen if the all data is written at the + * first time after the pio setup fis is received + */ + all_frames_transferred = true; + } + + /* all data transferred. */ + if (all_frames_transferred) { + /* + * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE + * and wait for PIO_SETUP fis / or D2H REg fis. */ + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + } + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED + ); + break; + } + + return status; +} + +/** + * + * @request: This is the request which is receiving the event. + * @event_code: This is the event code that the request on which the request is + * expected to take action. + * + * This method will handle any link layer events while waiting for the data + * frame. enum sci_status SCI_SUCCESS SCI_FAILURE + */ +static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler( + struct scic_sds_request *request, + u32 event_code) +{ + enum sci_status status; + + switch (scu_get_event_specifier(event_code)) { + case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: + /* + * We are waiting for data and the SCU has R_ERR the data frame. + * Go back to waiting for the D2H Register FIS */ + sci_base_state_machine_change_state( + &request->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + + status = SCI_SUCCESS; + break; + + default: + dev_err(scic_to_dev(request->owning_controller), + "%s: SCIC PIO Request 0x%p received unexpected " + "event 0x%08x\n", + __func__, request, event_code); + + /* / @todo Should we fail the PIO request when we get an unexpected event? */ + status = SCI_FAILURE; + break; + } + + return status; +} + +/* --------------------------------------------------------------------------- */ + +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, + .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, + } +}; + +static void scic_sds_stp_request_started_pio_await_h2d_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_pio_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req); +} + +static void scic_sds_stp_request_started_pio_await_frame_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_pio_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); +} + +static void scic_sds_stp_request_started_pio_data_in_await_data_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_pio_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE + ); +} + +static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_pio_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE + ); +} + +/* --------------------------------------------------------------------------- */ + +static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_await_frame_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter, + } +}; + +enum sci_status +scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, + bool copy_rx_frame) +{ + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; + + scic_sds_stp_non_ncq_request_construct(sci_req); + + scu_stp_raw_request_construct_task_context(stp_req, + sci_req->task_context_buffer); + + pio->current_transfer_bytes = 0; + pio->ending_error = 0; + pio->ending_status = 0; + + pio->request_current.sgl_offset = 0; + pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; + + if (copy_rx_frame) { + scic_sds_request_build_sgl(sci_req); + /* Since the IO request copy of the TC contains the same data as + * the actual TC this pointer is vaild for either. + */ + pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; + } else { + /* The user does not want the data copied to the SGL buffer location */ + pio->request_current.sgl_pair = NULL; + } + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + sci_req, + scic_sds_stp_request_started_pio_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} + +static void scic_sds_stp_request_udma_complete_request( + struct scic_sds_request *request, + u32 scu_status, + enum sci_status sci_status) +{ + scic_sds_request_set_status(request, scu_status, sci_status); + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); +} + +static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if ((status == SCI_SUCCESS) && + (frame_header->fis_type == FIS_REGD2H)) { + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + } + + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + /* + * We must check ther response buffer to see if the D2H Register FIS was + * received before we got the TC completion. */ + if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { + scic_sds_remote_device_suspend(sci_req->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + } else { + /* + * If we have an error completion status for the TC then we can expect a + * D2H register FIS from the device so we must change state to wait for it */ + sci_base_state_machine_change_state(&sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); + } + break; + + /* + * / @todo Check to see if any of these completion status need to wait for + * / the device to host register fis. */ + /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): + scic_sds_remote_device_suspend(sci_req->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + /* Fall through to the default case */ + default: + /* All other completion status cause the IO to be complete. */ + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + break; + } + + return status; +} + +static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + + /* Use the general frame handler to copy the resposne data */ + status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); + + if (status != SCI_SUCCESS) + return status; + + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + return status; +} + +/* --------------------------------------------------------------------------- */ + +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, + .frame_handler = scic_sds_stp_request_udma_general_frame_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, + }, +}; + +static void scic_sds_stp_request_started_udma_await_tc_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_udma_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE + ); +} + +/** + * + * + * This state is entered when there is an TC completion failure. The hardware + * received an unexpected condition while processing the IO request and now + * will UF the D2H register FIS to complete the IO. + */ +static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_udma_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE + ); +} + +/* --------------------------------------------------------------------------- */ + +static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter, + }, +}; + +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) +{ + scic_sds_stp_non_ncq_request_construct(sci_req); + + scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, + len, dir); + + sci_base_state_machine_construct( + &sci_req->started_substate_machine, + sci_req, + scic_sds_stp_request_started_udma_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE + ); + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. + */ +static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. + */ +static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->started_substate_machine, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * + * @request: This parameter specifies the request for which a frame has been + * received. + * @frame_index: This parameter specifies the index of the frame that has been + * received. + * + * This method processes frames received from the target while waiting for a + * device to host register FIS. If a non-register FIS is received during this + * time, it is treated as a protocol violation from an IO perspective. Indicate + * if the received frame was processed successfully. + */ +static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +/* --------------------------------------------------------------------------- */ + +static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, + }, +}; + +static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_soft_reset_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req + ); +} + +static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + struct scu_task_context *task_context; + struct host_to_dev_fis *h2d_fis; + enum sci_status status; + + /* Clear the SRST bit */ + h2d_fis = &sci_req->stp.cmd; + h2d_fis->control = 0; + + /* Clear the TC control bit */ + task_context = scic_sds_controller_get_task_context_buffer( + sci_req->owning_controller, sci_req->io_tag); + task_context->control_frame = 0; + + status = scic_controller_continue_io(sci_req); + if (status == SCI_SUCCESS) { + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_soft_reset_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE + ); + } +} + +static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_stp_request_started_soft_reset_substate_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE + ); +} + +static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = { + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter, + }, +}; + +enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req) +{ + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + scic_sds_stp_non_ncq_request_construct(sci_req); + + /* Build the STP task context structure */ + scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); + + sci_base_state_machine_construct(&sci_req->started_substate_machine, + sci_req, + scic_sds_stp_request_started_soft_reset_substate_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); + + return SCI_SUCCESS; +} diff --git a/drivers/scsi/isci/stp_request.h b/drivers/scsi/isci/stp_request.h new file mode 100644 index 00000000000..eb14874ceda --- /dev/null +++ b/drivers/scsi/isci/stp_request.h @@ -0,0 +1,195 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_STP_REQUEST_T_ +#define _SCIC_SDS_STP_REQUEST_T_ + +#include <linux/dma-mapping.h> +#include <scsi/sas.h> + +struct scic_sds_stp_request { + union { + u32 ncq; + + u32 udma; + + struct scic_sds_stp_pio_request { + /** + * Total transfer for the entire PIO request recorded at request constuction + * time. + * + * @todo Should we just decrement this value for each byte of data transitted + * or received to elemenate the current_transfer_bytes field? + */ + u32 total_transfer_bytes; + + /** + * Total number of bytes received/transmitted in data frames since the start + * of the IO request. At the end of the IO request this should equal the + * total_transfer_bytes. + */ + u32 current_transfer_bytes; + + /** + * The number of bytes requested in the in the PIO setup. + */ + u32 pio_transfer_bytes; + + /** + * PIO Setup ending status value to tell us if we need to wait for another FIS + * or if the transfer is complete. On the receipt of a D2H FIS this will be + * the status field of that FIS. + */ + u8 ending_status; + + /** + * On receipt of a D2H FIS this will be the ending error field if the + * ending_status has the SATA_STATUS_ERR bit set. + */ + u8 ending_error; + + struct scic_sds_request_pio_sgl { + struct scu_sgl_element_pair *sgl_pair; + u8 sgl_set; + u32 sgl_offset; + } request_current; + } pio; + + struct { + /** + * The number of bytes requested in the PIO setup before CDB data frame. + */ + u32 device_preferred_cdb_length; + } packet; + } type; +}; + +/** + * enum scic_sds_stp_request_started_udma_substates - This enumeration depicts + * the various sub-states associated with a SATA/STP UDMA protocol operation. + * + * + */ +enum scic_sds_stp_request_started_udma_substates { + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE, +}; + +/** + * enum scic_sds_stp_request_started_non_data_substates - This enumeration + * depicts the various sub-states associated with a SATA/STP non-data + * protocol operation. + * + * + */ +enum scic_sds_stp_request_started_non_data_substates { + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE, +}; + +/** + * enum scic_sds_stp_request_started_soft_reset_substates - THis enumeration + * depicts the various sub-states associated with a SATA/STP soft reset + * operation. + * + * + */ +enum scic_sds_stp_request_started_soft_reset_substates { + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, +}; + +/* This is the enumeration of the SATA PIO DATA IN started substate machine. */ +enum _scic_sds_stp_request_started_pio_substates { + /** + * While in this state the IO request object is waiting for the TC completion + * notification for the H2D Register FIS + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE, + + /** + * While in this state the IO request object is waiting for either a PIO Setup + * FIS or a D2H register FIS. The type of frame received is based on the + * result of the prior frame and line conditions. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE, + + /** + * While in this state the IO request object is waiting for a DATA frame from + * the device. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE, + + /** + * While in this state the IO request object is waiting to transmit the next data + * frame to the device. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, +}; + +struct scic_sds_request; + +enum sci_status scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, + bool copy_rx_frame); +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 transfer_length, + enum dma_data_direction dir); +enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req); +enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req); +enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, + u32 transfer_length, + enum dma_data_direction dir); +#endif /* _SCIC_SDS_STP_REQUEST_T_ */ diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 2a860388192..078e2ee4f4b 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -56,15 +56,12 @@ #include <linux/completion.h> #include <linux/irqflags.h> #include "sas.h" -#include "scic_task_request.h" -#include "scic_io_request.h" #include "remote_device.h" #include "remote_node_context.h" #include "isci.h" #include "request.h" #include "sata.h" #include "task.h" -#include "scic_sds_request.h" #include "timers.h" /** diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 8d68dcc52bc..12e67634638 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -56,7 +56,6 @@ #include "host.h" #include "unsolicited_frame_control.h" #include "registers.h" -#include "sci_util.h" /** * This method will program the unsolicited frames (UFs) into the UF address @@ -93,10 +92,8 @@ static void scic_sds_unsolicited_frame_control_construct_frames( for (index = 0; index < unused_uf_header_entries; index++) { uf = &uf_control->buffers.array[index]; - sci_cb_make_physical_address( - uf_control->address_table.array[index], 0, 0 - ); uf->buffer = NULL; + uf_control->address_table.array[index] = 0; uf->header = &uf_control->headers.array[index]; uf->state = UNSOLICITED_FRAME_EMPTY; } -- cgit v1.2.3-70-g09d2 From f139303d17c47eff4c5b5407dee0a6d43e8fd146 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 10 May 2011 02:28:47 -0700 Subject: isci: merge ssp task management substates into primary state machine Remove usage of the request substate machine for ssp task management requests identified by: ireq->ttype == tmf_task && dev->dev_type == SAS_END_DEV; The only routine that checks the base 'started' state is scic_sds_io_request_tc_completion which calls the substate machine handler if we are not in the 'started' state or we are 'started' and no substate machine is defined. This routine requires no conversion because we have transitioned out of 'started' and the substate routine will be called naturally as a result. There are also no side effects of this conversion on exiting the 'started', state because it only stops the substate machine, which is no longer relevant for this transaction type. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/Makefile | 1 - drivers/scsi/isci/request.c | 185 +++++++++++++++++++++++++++---- drivers/scsi/isci/request.h | 38 +++---- drivers/scsi/isci/ssp_request.c | 240 ---------------------------------------- 4 files changed, 178 insertions(+), 286 deletions(-) delete mode 100644 drivers/scsi/isci/ssp_request.c (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile index 48218ca908f..43d2c050bd4 100644 --- a/drivers/scsi/isci/Makefile +++ b/drivers/scsi/isci/Makefile @@ -7,6 +7,5 @@ isci-objs := init.o phy.o request.o sata.o \ remote_node_table.o \ unsolicited_frame_control.o \ stp_request.o \ - ssp_request.o \ smp_request.o \ port_config.o \ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 48e2dac7252..5f516811055 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -656,7 +656,7 @@ enum sci_status scic_sds_io_request_frame_handler( * @sci_req: This parameter specifies the request object for which to copy * the response data. */ -void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) +static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) { void *resp_buf; u32 len; @@ -978,7 +978,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completi sci_base_state_machine_get_state(&request->state_machine)); return SCI_FAILURE_INVALID_STATE; - } /* @@ -1151,9 +1150,119 @@ static enum sci_status scic_sds_request_aborting_state_frame_handler( return SCI_SUCCESS; } +/** + * This method processes the completions transport layer (TL) status to + * determine if the RAW task management frame was sent successfully. If the + * raw frame was sent successfully, then the state for the task request + * transitions to waiting for a response frame. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + break; + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + /* + * Currently, the decision is to simply allow the task request to + * timeout if the task IU wasn't received successfully. + * There is a potential for receiving multiple task responses if we + * decide to send the task IU again. */ + dev_warn(scic_to_dev(sci_req->owning_controller), + "%s: TaskRequest:0x%p CompletionCode:%x - " + "ACK/NAK timeout\n", + __func__, + sci_req, + completion_code); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * This method is responsible for processing a terminate/abort request for this + * TC while the request is waiting for the task management response + * unsolicited frame. + * @sci_req: This parameter specifies the request for which the + * termination was requested. + * + * This method returns an indication as to whether the abort request was + * successfully handled. need to update to ensure the received UF doesn't cause + * damage to subsequent requests (i.e. put the extended tag in a holding + * pattern for this particular device). + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler( + struct scic_sds_request *request) +{ + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; +} + +/** + * This method processes an unsolicited frame while the task mgmt request is + * waiting for a response frame. It will copy the response data, release + * the unsolicited frame, and transition the request to the + * SCI_BASE_REQUEST_STATE_COMPLETED state. + * @sci_req: This parameter specifies the request for which the + * unsolicited frame was received. + * @frame_index: This parameter indicates the unsolicited frame index that + * should contain the response. + * + * This method returns an indication of whether the TC response frame was + * handled successfully or not. SCI_SUCCESS Currently this value is always + * returned and indicates successful processing of the TC response. Should + * probably update to check frame type and make sure it is a response frame. + */ +static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler( + struct scic_sds_request *request, + u32 frame_index) +{ + scic_sds_io_request_copy_response(request); + + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + scic_sds_controller_release_frame(request->owning_controller, + frame_index); + return SCI_SUCCESS; +} + static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { - }, + [SCI_BASE_REQUEST_STATE_INITIAL] = { }, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { .start_handler = scic_sds_request_constructed_state_start_handler, .abort_handler = scic_sds_request_constructed_state_abort_handler, @@ -1163,6 +1272,14 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, .frame_handler = scic_sds_request_started_state_frame_handler, }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, + }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { + .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, + .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, + }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, @@ -1171,8 +1288,7 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, .frame_handler = scic_sds_request_aborting_state_frame_handler, }, - [SCI_BASE_REQUEST_STATE_FINAL] = { - }, + [SCI_BASE_REQUEST_STATE_FINAL] = { }, }; @@ -1919,6 +2035,9 @@ static void scic_sds_request_constructed_state_enter(void *object) static void scic_sds_request_started_state_enter(void *object) { struct scic_sds_request *sci_req = object; + struct sci_base_state_machine *sm = &sci_req->state_machine; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct domain_device *dev = sci_dev_to_domain(sci_req->target_device); SET_STATE_HANDLER( sci_req, @@ -1926,9 +2045,13 @@ static void scic_sds_request_started_state_enter(void *object) SCI_BASE_REQUEST_STATE_STARTED ); - /* - * Most of the request state machines have a started substate machine so - * start its execution on the entry to the started state. */ + if (ireq->ttype == tmf_task && dev->dev_type == SAS_END_DEV) + sci_base_state_machine_change_state(sm, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); + + /* Most of the request state machines have a started substate machine so + * start its execution on the entry to the started state. + */ if (sci_req->has_started_substate_machine == true) sci_base_state_machine_start(&sci_req->started_substate_machine); } @@ -2026,6 +2149,30 @@ static void scic_sds_request_final_state_enter(void *object) ); } +static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION + ); +} + +static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE + ); +} + static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { .enter_state = scic_sds_request_initial_state_enter, @@ -2037,6 +2184,12 @@ static const struct sci_base_state scic_sds_request_state_table[] = { .enter_state = scic_sds_request_started_state_enter, .exit_state = scic_sds_request_started_state_exit }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { + .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, + }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { + .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, + }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .enter_state = scic_sds_request_completed_state_enter, }, @@ -2126,19 +2279,9 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, /* Build the common part of the request */ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEV) scic_sds_ssp_task_request_assign_buffers(sci_req); - - sci_req->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_io_request_started_task_mgmt_substate_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); - } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) + else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) scic_sds_stp_request_assign_buffers(sci_req); else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 932ea767c8c..b5c5b063a36 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -295,6 +295,20 @@ enum sci_base_request_states { */ SCI_BASE_REQUEST_STATE_STARTED, + /** + * The AWAIT_TC_COMPLETION sub-state indicates that the started raw + * task management request is waiting for the transmission of the + * initial frame (i.e. command, task, etc.). + */ + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION, + + /** + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, + /** * This state indicates that the request has completed. * This state is entered from the STARTED state. This state is entered from @@ -451,7 +465,6 @@ void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req); void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req); enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); -void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, u32 event_code); enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, @@ -459,29 +472,6 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req); -/** - * enum _scic_sds_io_request_started_task_mgmt_substates - This enumeration - * depicts all of the substates for a task management request to be - * performed in the STARTED super-state. - * - * - */ -enum scic_sds_raw_request_started_task_mgmt_substates { - /** - * The AWAIT_TC_COMPLETION sub-state indicates that the started raw - * task management request is waiting for the transmission of the - * initial frame (i.e. command, task, etc.). - */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION, - - /** - * This sub-state indicates that the started task management request - * is waiting for the reception of an unsolicited frame - * (i.e. response IU). - */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, -}; - /** * enum _scic_sds_smp_request_started_substates - This enumeration depicts all diff --git a/drivers/scsi/isci/ssp_request.c b/drivers/scsi/isci/ssp_request.c deleted file mode 100644 index 4b6317aeef7..00000000000 --- a/drivers/scsi/isci/ssp_request.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "host.h" -#include "request.h" -#include "state_machine.h" -#include "scu_task_context.h" -#include "scu_completion_codes.h" - -/** - * This method processes the completions transport layer (TL) status to - * determine if the RAW task management frame was sent successfully. If the - * raw frame was sent successfully, then the state for the task request - * transitions to waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): - /* - * Currently, the decision is to simply allow the task request to - * timeout if the task IU wasn't received successfully. - * There is a potential for receiving multiple task responses if we - * decide to send the task IU again. */ - dev_warn(scic_to_dev(sci_req->owning_controller), - "%s: TaskRequest:0x%p CompletionCode:%x - " - "ACK/NAK timeout\n", - __func__, - sci_req, - completion_code); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * This method is responsible for processing a terminate/abort request for this - * TC while the request is waiting for the task management response - * unsolicited frame. - * @sci_req: This parameter specifies the request for which the - * termination was requested. - * - * This method returns an indication as to whether the abort request was - * successfully handled. need to update to ensure the received UF doesn't cause - * damage to subsequent requests (i.e. put the extended tag in a holding - * pattern for this particular device). - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler( - struct scic_sds_request *request) -{ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -/** - * This method processes an unsolicited frame while the task mgmt request is - * waiting for a response frame. It will copy the response data, release - * the unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This method returns an indication of whether the TC response frame was - * handled successfully or not. SCI_SUCCESS Currently this value is always - * returned and indicates successful processing of the TC response. Should - * probably update to check frame type and make sure it is a response frame. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler( - struct scic_sds_request *request, - u32 frame_index) -{ - scic_sds_io_request_copy_response(request); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - scic_sds_controller_release_frame(request->owning_controller, - frame_index); - return SCI_SUCCESS; -} - -static const struct scic_sds_io_request_state_handler scic_sds_ssp_task_request_started_substate_handler_table[] = { - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, - .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, - } -}; - -/** - * This method performs the actions required when entering the - * SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - * sub-state. This includes setting the IO request state handlers for this - * sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_ssp_task_request_started_substate_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); -} - -/** - * This method performs the actions required when entering the - * SCIC_SDS_IO_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This - * includes setting the IO request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_ssp_task_request_started_substate_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); -} - -const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[] = { - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, - }, -}; - -- cgit v1.2.3-70-g09d2 From c72086e3c2897eaca5b99c005dd9844fdc784981 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 10 May 2011 02:28:48 -0700 Subject: isci: merge smp request substates into primary state machine Remove usage of the request substate machine for smp requests identified by: task->task_proto == SAS_PROTOCOL_SMP While merging over the smp_request infrastructure noticed that all the assign buffer implementations are now equal, so moved it to scic_sds_general_request_construct. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/Makefile | 1 - drivers/scsi/isci/request.c | 462 +++++++++++++++++++++++++++++++---- drivers/scsi/isci/request.h | 42 ++-- drivers/scsi/isci/smp_request.c | 518 ---------------------------------------- drivers/scsi/isci/stp_request.c | 6 - 5 files changed, 436 insertions(+), 593 deletions(-) delete mode 100644 drivers/scsi/isci/smp_request.c (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile index 43d2c050bd4..be2b67d0df8 100644 --- a/drivers/scsi/isci/Makefile +++ b/drivers/scsi/isci/Makefile @@ -7,5 +7,4 @@ isci-objs := init.o phy.o request.o sata.o \ remote_node_table.o \ unsolicited_frame_control.o \ stp_request.o \ - smp_request.o \ port_config.o \ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 5f516811055..5201dc58a19 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -158,12 +158,6 @@ void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) } } -static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) { struct ssp_cmd_iu *cmd_iu; @@ -341,12 +335,6 @@ static void scu_ssp_io_request_construct_task_context( scic_sds_request_build_sgl(sci_req); } -static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - /** * This method will fill in the SCU Task Context for a SSP Task request. The * following important settings are utilized: -# priority == @@ -1261,6 +1249,196 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler return SCI_SUCCESS; } +/** + * This method processes an abnormal TC completion while the SMP request is + * waiting for a response frame. It decides what happened to the IO based + * on TC completion status. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* + * In the AWAIT RESPONSE state, any TC completion is unexpected. + * but if the TC has success status, we complete the IO anyway. */ + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + /* + * These status has been seen in a specific LSI expander, which sometimes + * is not able to send smp response within 2 ms. This causes our hardware + * break the connection and set TC completion with one of these SMP_XXX_XX_ERR + * status. For these type of error, we ask scic user to retry the request. */ + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/* + * This function processes an unsolicited frame while the SMP request is waiting + * for a response frame. It will copy the response data, release the + * unsolicited frame, and transition the request to the + * SCI_BASE_REQUEST_STATE_COMPLETED state. + * @sci_req: This parameter specifies the request for which the + * unsolicited frame was received. + * @frame_index: This parameter indicates the unsolicited frame index that + * should contain the response. + * + * This function returns an indication of whether the response frame was handled + * successfully or not. SCI_SUCCESS Currently this value is always returned and + * indicates successful processing of the TC response. + */ +static enum sci_status +scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + void *frame_header; + struct smp_resp *rsp_hdr = &sci_req->smp.rsp; + ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); + + status = scic_sds_unsolicited_frame_control_get_header( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + &frame_header); + + /* byte swap the header. */ + sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); + + if (rsp_hdr->frame_type == SMP_RESPONSE) { + void *smp_resp; + + status = scic_sds_unsolicited_frame_control_get_buffer( + &(scic_sds_request_get_controller(sci_req)->uf_control), + frame_index, + &smp_resp); + + word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / + sizeof(u32); + + sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, + smp_resp, word_cnt); + + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); + } else { + /* This was not a response frame why did it get forwarded? */ + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: SCIC SMP Request 0x%p received unexpected frame " + "%d type 0x%02x\n", + __func__, + sci_req, + frame_index, + rsp_hdr->frame_type); + + scic_sds_request_set_status( + sci_req, + SCU_TASK_DONE_SMP_FRM_TYPE_ERR, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } + + scic_sds_controller_release_frame(sci_req->owning_controller, + frame_index); + + return SCI_SUCCESS; +} + +/** + * This method processes the completions transport layer (TL) status to + * determine if the SMP request was sent successfully. If the SMP request + * was sent successfully, then the state for the SMP request transits to + * waiting for a response frame. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { }, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { @@ -1280,6 +1458,15 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, + .frame_handler = scic_sds_smp_request_await_response_frame_handler, + }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, + }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, @@ -2038,6 +2225,12 @@ static void scic_sds_request_started_state_enter(void *object) struct sci_base_state_machine *sm = &sci_req->state_machine; struct isci_request *ireq = sci_req_to_ireq(sci_req); struct domain_device *dev = sci_dev_to_domain(sci_req->target_device); + struct sas_task *task; + + /* XXX as hch said always creating an internal sas_task for tmf + * requests would simplify the driver + */ + task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; SET_STATE_HANDLER( sci_req, @@ -2045,15 +2238,19 @@ static void scic_sds_request_started_state_enter(void *object) SCI_BASE_REQUEST_STATE_STARTED ); - if (ireq->ttype == tmf_task && dev->dev_type == SAS_END_DEV) - sci_base_state_machine_change_state(sm, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); - /* Most of the request state machines have a started substate machine so * start its execution on the entry to the started state. */ if (sci_req->has_started_substate_machine == true) sci_base_state_machine_start(&sci_req->started_substate_machine); + + if (!task && dev->dev_type == SAS_END_DEV) { + sci_base_state_machine_change_state(sm, + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); + } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { + sci_base_state_machine_change_state(sm, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); + } } /** @@ -2173,6 +2370,28 @@ static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_e ); } +static void scic_sds_smp_request_started_await_response_substate_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE + ); +} + +static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION + ); +} + static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { .enter_state = scic_sds_request_initial_state_enter, @@ -2190,6 +2409,12 @@ static const struct sci_base_state scic_sds_request_state_table[] = { [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { + .enter_state = scic_sds_smp_request_started_await_response_substate_enter, + }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { + .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter, + }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .enter_state = scic_sds_request_completed_state_enter, }, @@ -2225,7 +2450,7 @@ static void scic_sds_general_request_construct(struct scic_sds_controller *scic, if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { sci_req->was_tag_assigned_by_user = false; - sci_req->task_context_buffer = NULL; + sci_req->task_context_buffer = &sci_req->tc; } else { sci_req->was_tag_assigned_by_user = true; @@ -2245,26 +2470,20 @@ scic_io_request_construct(struct scic_sds_controller *scic, /* Build the common part of the request */ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); - if (sci_dev->rnc.remote_node_index == - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; if (dev->dev_type == SAS_END_DEV) - scic_sds_ssp_io_request_assign_buffers(sci_req); - else if ((dev->dev_type == SATA_DEV) || - (dev->tproto & SAS_PROTOCOL_STP)) { - scic_sds_stp_request_assign_buffers(sci_req); + /* pass */; + else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); - } else if (dev_is_expander(dev)) { - scic_sds_smp_request_assign_buffers(sci_req); + else if (dev_is_expander(dev)) memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); - } else - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + else + return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - if (status == SCI_SUCCESS) { - memset(sci_req->task_context_buffer, 0, - offsetof(struct scu_task_context, sgl_pair_ab)); - } + memset(sci_req->task_context_buffer, 0, + offsetof(struct scu_task_context, sgl_pair_ab)); return status; } @@ -2279,17 +2498,12 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, /* Build the common part of the request */ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); - if (dev->dev_type == SAS_END_DEV) - scic_sds_ssp_task_request_assign_buffers(sci_req); - else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) - scic_sds_stp_request_assign_buffers(sci_req); - else - status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - - if (status == SCI_SUCCESS) { + if (dev->dev_type == SAS_END_DEV || + dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { sci_req->is_task_management_request = true; memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); - } + } else + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; return status; } @@ -2340,6 +2554,174 @@ static enum sci_status isci_request_stp_request_construct( return status; } +/* + * This function will fill in the SCU Task Context for a SMP request. The + * following important settings are utilized: -# task_type == + * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type + * (i.e. non-raw frame) is being utilized to perform task management. -# + * control_frame == 1. This ensures that the proper endianess is set so + * that the bytes are transmitted in the right order for a smp request frame. + * @sci_req: This parameter specifies the smp request object being + * constructed. + * + */ +static void +scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, + struct smp_req *smp_req) +{ + dma_addr_t dma_addr; + struct scic_sds_controller *scic; + struct scic_sds_remote_device *sci_dev; + struct scic_sds_port *sci_port; + struct scu_task_context *task_context; + ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); + + /* byte swap the smp request. */ + sci_swab32_cpy(&sci_req->smp.cmd, smp_req, + word_cnt); + + task_context = scic_sds_request_get_task_context(sci_req); + + scic = scic_sds_request_get_controller(sci_req); + sci_dev = scic_sds_request_get_device(sci_req); + sci_port = scic_sds_request_get_port(sci_req); + + /* + * Fill in the TC with the its required data + * 00h + */ + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = sci_dev->connection_rate; + task_context->protocol_engine_index = + scic_sds_controller_get_protocol_engine_group(scic); + task_context->logical_port_index = scic_sds_port_get_index(sci_port); + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; + task_context->abort = 0; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + /* 04h */ + task_context->remote_node_index = sci_dev->rnc.remote_node_index; + task_context->command_code = 0; + task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; + + /* 08h */ + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 1; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + /* 0ch */ + task_context->address_modifier = 0; + + /* 10h */ + task_context->ssp_command_iu_length = smp_req->req_len; + + /* 14h */ + task_context->transfer_length_bytes = 0; + + /* + * 18h ~ 30h, protocol specific + * since commandIU has been build by framework at this point, we just + * copy the frist DWord from command IU to this location. */ + memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); + + /* + * 40h + * "For SMP you could program it to zero. We would prefer that way + * so that done code will be consistent." - Venki + */ + task_context->task_phase = 0; + + if (sci_req->was_tag_assigned_by_user) { + /* + * Build the task context now since we have already read + * the data + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(scic) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(sci_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + scic_sds_io_tag_get_index(sci_req->io_tag)); + } else { + /* + * Build the task context now since we have already read + * the data. + * I/O tag index is not assigned because we have to wait + * until we get a TCi. + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(scic) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(sci_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); + } + + /* + * Copy the physical address for the command buffer to the SCU Task + * Context command buffer should not contain command header. + */ + dma_addr = scic_io_request_get_dma_addr(sci_req, + ((char *) &sci_req->smp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SMP response comes as UF, so no need to set response IU address. */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + +static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) +{ + struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); + + if (!smp_req) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + + sci_req->protocol = SCIC_SMP_PROTOCOL; + + /* Construct the SMP SCU Task Context */ + memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req)); + + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. */ + if (smp_req->req_len == 0) { + switch (smp_req->func) { + case SMP_DISCOVER: + case SMP_REPORT_PHY_ERR_LOG: + case SMP_REPORT_PHY_SATA: + case SMP_REPORT_ROUTE_INFO: + smp_req->req_len = 2; + break; + case SMP_CONF_ROUTE_INFO: + case SMP_PHY_CONTROL: + case SMP_PHY_TEST_FUNCTION: + smp_req->req_len = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + + scu_smp_request_construct_task_context(sci_req, smp_req); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); + + kfree(smp_req); + + return SCI_SUCCESS; +} + /* * isci_smp_request_build() - This function builds the smp request. * @ireq: This parameter points to the isci_request allocated in the diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index b5c5b063a36..d090cb1a14d 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -88,7 +88,7 @@ enum sci_request_protocol { SCIC_SMP_PROTOCOL, SCIC_SSP_PROTOCOL, SCIC_STP_PROTOCOL -}; /* XXX remove me, use sas_task.dev instead */; +}; /* XXX remove me, use sas_task.{dev|task_proto} instead */; struct scic_sds_request { /** @@ -309,6 +309,19 @@ enum sci_base_request_states { */ SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, + /** + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE, + + /** + * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is + * waiting for the transmission of the initial frame (i.e. command, task, etc.). + */ + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, + /** * This state indicates that the request has completed. * This state is entered from the STARTED state. This state is entered from @@ -461,8 +474,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completi } void scic_sds_request_build_sgl(struct scic_sds_request *sci_req); -void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req); -void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req); enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, @@ -473,30 +484,6 @@ enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req); -/** - * enum _scic_sds_smp_request_started_substates - This enumeration depicts all - * of the substates for a SMP request to be performed in the STARTED - * super-state. - * - * - */ -enum scic_sds_smp_request_started_substates { - /** - * This sub-state indicates that the started task management request - * is waiting for the reception of an unsolicited frame - * (i.e. response IU). - */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE, - - /** - * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is - * waiting for the transmission of the initial frame (i.e. command, task, etc.). - */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, -}; - - - /* XXX open code in caller */ static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr) @@ -791,7 +778,6 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); -enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req); void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/smp_request.c b/drivers/scsi/isci/smp_request.c deleted file mode 100644 index d4750a761c5..00000000000 --- a/drivers/scsi/isci/smp_request.c +++ /dev/null @@ -1,518 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "state_machine.h" -#include "remote_device.h" -#include "request.h" -#include "scu_completion_codes.h" -#include "scu_task_context.h" -#include "host.h" - -static void scu_smp_request_construct_task_context( - struct scic_sds_request *sci_req, - struct smp_req *smp_req); - -void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - -/* - * This function will fill in the SCU Task Context for a SMP request. The - * following important settings are utilized: -# task_type == - * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type - * (i.e. non-raw frame) is being utilized to perform task management. -# - * control_frame == 1. This ensures that the proper endianess is set so - * that the bytes are transmitted in the right order for a smp request frame. - * @sci_req: This parameter specifies the smp request object being - * constructed. - * - */ -static void -scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, - struct smp_req *smp_req) -{ - dma_addr_t dma_addr; - struct scic_sds_controller *scic; - struct scic_sds_remote_device *sci_dev; - struct scic_sds_port *sci_port; - struct scu_task_context *task_context; - ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); - - /* byte swap the smp request. */ - sci_swab32_cpy(&sci_req->smp.cmd, smp_req, - word_cnt); - - task_context = scic_sds_request_get_task_context(sci_req); - - scic = scic_sds_request_get_controller(sci_req); - sci_dev = scic_sds_request_get_device(sci_req); - sci_port = scic_sds_request_get_port(sci_req); - - /* - * Fill in the TC with the its required data - * 00h - */ - task_context->priority = 0; - task_context->initiator_request = 1; - task_context->connection_rate = sci_dev->connection_rate; - task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(scic); - task_context->logical_port_index = scic_sds_port_get_index(sci_port); - task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; - task_context->abort = 0; - task_context->valid = SCU_TASK_CONTEXT_VALID; - task_context->context_type = SCU_TASK_CONTEXT_TYPE; - - /* 04h */ - task_context->remote_node_index = sci_dev->rnc.remote_node_index; - task_context->command_code = 0; - task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; - - /* 08h */ - task_context->link_layer_control = 0; - task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 1; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - /* 0ch */ - task_context->address_modifier = 0; - - /* 10h */ - task_context->ssp_command_iu_length = smp_req->req_len; - - /* 14h */ - task_context->transfer_length_bytes = 0; - - /* - * 18h ~ 30h, protocol specific - * since commandIU has been build by framework at this point, we just - * copy the frist DWord from command IU to this location. */ - memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); - - /* - * 40h - * "For SMP you could program it to zero. We would prefer that way - * so that done code will be consistent." - Venki - */ - task_context->task_phase = 0; - - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - scic_sds_io_tag_get_index(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - - /* - * Copy the physical address for the command buffer to the SCU Task - * Context command buffer should not contain command header. - */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->smp.cmd) + - sizeof(u32)); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); - - /* SMP response comes as UF, so no need to set response IU address. */ - task_context->response_iu_upper = 0; - task_context->response_iu_lower = 0; -} - -/* - * This function processes an unsolicited frame while the SMP request is waiting - * for a response frame. It will copy the response data, release the - * unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This function returns an indication of whether the response frame was handled - * successfully or not. SCI_SUCCESS Currently this value is always returned and - * indicates successful processing of the TC response. - */ -static enum sci_status -scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - void *frame_header; - struct smp_resp *rsp_hdr = &sci_req->smp.rsp; - ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); - - status = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &frame_header); - - /* byte swap the header. */ - sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); - - if (rsp_hdr->frame_type == SMP_RESPONSE) { - void *smp_resp; - - status = scic_sds_unsolicited_frame_control_get_buffer( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &smp_resp); - - word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / - sizeof(u32); - - sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, - smp_resp, word_cnt); - - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); - } else { - /* This was not a response frame why did it get forwarded? */ - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC SMP Request 0x%p received unexpected frame " - "%d type 0x%02x\n", - __func__, - sci_req, - frame_index, - rsp_hdr->frame_type); - - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_SMP_FRM_TYPE_ERR, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } - - scic_sds_controller_release_frame(sci_req->owning_controller, - frame_index); - - return SCI_SUCCESS; -} - - -/** - * This method processes an abnormal TC completion while the SMP request is - * waiting for a response frame. It decides what happened to the IO based - * on TC completion status. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* - * In the AWAIT RESPONSE state, any TC completion is unexpected. - * but if the TC has success status, we complete the IO anyway. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): - /* - * These status has been seen in a specific LSI expander, which sometimes - * is not able to send smp response within 2 ms. This causes our hardware - * break the connection and set TC completion with one of these SMP_XXX_XX_ERR - * status. For these type of error, we ask scic user to retry the request. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - - -/** - * This method processes the completions transport layer (TL) status to - * determine if the SMP request was sent successfully. If the SMP request - * was sent successfully, then the state for the SMP request transits to - * waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - - -static const struct scic_sds_io_request_state_handler scic_sds_smp_request_started_substate_handler_table[] = { - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, - .frame_handler = scic_sds_smp_request_await_response_frame_handler, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, - } -}; - -/** - * This method performs the actions required when entering the - * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_RESPONSE sub-state. This - * includes setting the IO request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_smp_request_started_await_response_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_smp_request_started_substate_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); -} - -/** - * This method performs the actions required when entering the - * SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION sub-state. - * This includes setting the SMP request state handlers for this sub-state. - * @object: This parameter specifies the request object for which the sub-state - * change is occurring. - * - * none. - */ -static void scic_sds_smp_request_started_await_tc_completion_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_smp_request_started_substate_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION - ); -} - -static const struct sci_base_state scic_sds_smp_request_started_substate_table[] = { - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .enter_state = scic_sds_smp_request_started_await_response_substate_enter, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter, - }, -}; - -/** - * This method is called by the SCI user to build an SMP IO request. - * - * - The user must have previously called scic_io_request_construct() on the - * supplied IO request. Indicate if the controller successfully built the IO - * request. SCI_SUCCESS This value is returned if the IO request was - * successfully built. SCI_FAILURE_UNSUPPORTED_PROTOCOL This value is returned - * if the remote_device does not support the SMP protocol. - * SCI_FAILURE_INVALID_ASSOCIATION This value is returned if the user did not - * properly set the association between the SCIC IO request and the user's IO - * request. - */ -enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) -{ - struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); - - if (!smp_req) - return SCI_FAILURE_INSUFFICIENT_RESOURCES; - - sci_req->protocol = SCIC_SMP_PROTOCOL; - sci_req->has_started_substate_machine = true; - - /* Construct the started sub-state machine. */ - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_smp_request_started_substate_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); - - /* Construct the SMP SCU Task Context */ - memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req)); - - /* - * Look at the SMP requests' header fields; for certain SAS 1.x SMP - * functions under SAS 2.0, a zero request length really indicates - * a non-zero default length. */ - if (smp_req->req_len == 0) { - switch (smp_req->func) { - case SMP_DISCOVER: - case SMP_REPORT_PHY_ERR_LOG: - case SMP_REPORT_PHY_SATA: - case SMP_REPORT_ROUTE_INFO: - smp_req->req_len = 2; - break; - case SMP_CONF_ROUTE_INFO: - case SMP_PHY_CONTROL: - case SMP_PHY_TEST_FUNCTION: - smp_req->req_len = 9; - break; - /* Default - zero is a valid default for 2.0. */ - } - } - - scu_smp_request_construct_task_context(sci_req, smp_req); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); - - kfree(smp_req); - - return SCI_SUCCESS; -} diff --git a/drivers/scsi/isci/stp_request.c b/drivers/scsi/isci/stp_request.c index 298086afa5a..e94ece81ed9 100644 --- a/drivers/scsi/isci/stp_request.c +++ b/drivers/scsi/isci/stp_request.c @@ -64,12 +64,6 @@ #include "scu_task_context.h" #include "request.h" -void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req) -{ - if (sci_req->was_tag_assigned_by_user == false) - sci_req->task_context_buffer = &sci_req->tc; -} - /** * This method is will fill in the SCU Task Context for any type of SATA * request. This is called from the various SATA constructors. -- cgit v1.2.3-70-g09d2 From 5dec6f4e41340196d223caf922578c44dfe2295a Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 10 May 2011 02:28:49 -0700 Subject: isci: merge stp request substates into primary state machine Remove usage of the request substate machine for stp requests, and kill the request substate infrastructure. Similar to the previous conversions this adds the substates to the primary state machine and arranges for the 'started' state to transition to the proper stp substate. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/Makefile | 1 - drivers/scsi/isci/request.c | 1649 +++++++++++++++++++++++++++++++++++---- drivers/scsi/isci/request.h | 113 ++- drivers/scsi/isci/stp_request.c | 1584 ------------------------------------- drivers/scsi/isci/stp_request.h | 195 ----- 5 files changed, 1593 insertions(+), 1949 deletions(-) delete mode 100644 drivers/scsi/isci/stp_request.c delete mode 100644 drivers/scsi/isci/stp_request.h (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile index be2b67d0df8..86b0c88e91a 100644 --- a/drivers/scsi/isci/Makefile +++ b/drivers/scsi/isci/Makefile @@ -6,5 +6,4 @@ isci-objs := init.o phy.o request.o sata.o \ remote_node_context.o \ remote_node_table.o \ unsolicited_frame_control.o \ - stp_request.o \ port_config.o \ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 5201dc58a19..f503e3e18d8 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -58,6 +58,7 @@ #include "request.h" #include "sata.h" #include "scu_completion_codes.h" +#include "scu_event_codes.h" #include "sas.h" /** @@ -92,7 +93,7 @@ static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( * the Scatter-Gather List. * */ -void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) +static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) { struct isci_request *isci_request = sci_req_to_ireq(sds_request); struct isci_host *isci_host = isci_request->isci_host; @@ -366,27 +367,214 @@ static void scu_ssp_task_request_construct_task_context( sizeof(struct ssp_task_iu) / sizeof(u32); } +/** + * This method is will fill in the SCU Task Context for any type of SATA + * request. This is called from the various SATA constructors. + * @sci_req: The general IO request object which is to be used in + * constructing the SCU task context. + * @task_context: The buffer pointer for the SCU task context which is being + * constructed. + * + * The general io request construction is complete. The buffer assignment for + * the command buffer is complete. none Revisit task context construction to + * determine what is common for SSP/SMP/STP task context structures. + */ +static void scu_sata_reqeust_construct_task_context( + struct scic_sds_request *sci_req, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct scic_sds_controller *controller; + struct scic_sds_remote_device *target_device; + struct scic_sds_port *target_port; + + controller = scic_sds_request_get_controller(sci_req); + target_device = scic_sds_request_get_device(sci_req); + target_port = scic_sds_request_get_port(sci_req); + + /* Fill in the TC with the its required data */ + task_context->abort = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->initiator_request = 1; + task_context->connection_rate = target_device->connection_rate; + task_context->protocol_engine_index = + scic_sds_controller_get_protocol_engine_group(controller); + task_context->logical_port_index = + scic_sds_port_get_index(target_port); + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = + scic_sds_remote_device_get_index(sci_req->target_device); + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + task_context->task_phase = 0x01; + + task_context->ssp_command_iu_length = + (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); + + /* Set the first word of the H2D REG FIS */ + task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; + + if (sci_req->was_tag_assigned_by_user) { + /* + * Build the task context now since we have already read + * the data + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + scic_sds_io_tag_get_index(sci_req->io_tag)); + } else { + /* + * Build the task context now since we have already read + * the data. + * I/O tag index is not assigned because we have to wait + * until we get a TCi. + */ + sci_req->post_context = + (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group( + controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); + } + + /* + * Copy the physical address for the command buffer to the SCU Task + * Context. We must offset the command buffer by 4 bytes because the + * first 4 bytes are transfered in the body of the TC. + */ + dma_addr = scic_io_request_get_dma_addr(sci_req, + ((char *) &sci_req->stp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SATA Requests do not have a response buffer */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + + /** - * This method constructs the SSP Command IU data for this ssp passthrough - * comand request object. - * @sci_req: This parameter specifies the request object for which the SSP - * command information unit is being built. + * scu_stp_raw_request_construct_task_context - + * @sci_req: This parameter specifies the STP request object for which to + * construct a RAW command frame task context. + * @task_context: This parameter specifies the SCU specific task context buffer + * to construct. * - * enum sci_status, returns invalid parameter is cdb > 16 + * This method performs the operations common to all SATA/STP requests + * utilizing the raw frame method. none */ +static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req, + struct scu_task_context *task_context) +{ + struct scic_sds_request *sci_req = to_sci_req(stp_req); + + scu_sata_reqeust_construct_task_context(sci_req, task_context); + + task_context->control_frame = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; + task_context->type.stp.fis_type = FIS_REGH2D; + task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); +} + +static enum sci_status +scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, + bool copy_rx_frame) +{ + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; + + scu_stp_raw_request_construct_task_context(stp_req, + sci_req->task_context_buffer); + + pio->current_transfer_bytes = 0; + pio->ending_error = 0; + pio->ending_status = 0; + + pio->request_current.sgl_offset = 0; + pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; + + if (copy_rx_frame) { + scic_sds_request_build_sgl(sci_req); + /* Since the IO request copy of the TC contains the same data as + * the actual TC this pointer is vaild for either. + */ + pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; + } else { + /* The user does not want the data copied to the SGL buffer location */ + pio->request_current.sgl_pair = NULL; + } + return SCI_SUCCESS; +} /** - * This method constructs the SATA request object. - * @sci_req: - * @sat_protocol: - * @transfer_length: - * @data_direction: - * @copy_rx_frame: * - * enum sci_status + * @sci_req: This parameter specifies the request to be constructed as an + * optimized request. + * @optimized_task_type: This parameter specifies whether the request is to be + * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A + * value of 1 indicates NCQ. + * + * This method will perform request construction common to all types of STP + * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method + * returns an indication as to whether the construction was successful. */ +static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, + u8 optimized_task_type, + u32 len, + enum dma_data_direction dir) +{ + struct scu_task_context *task_context = sci_req->task_context_buffer; + + /* Build the STP task context structure */ + scu_sata_reqeust_construct_task_context(sci_req, task_context); + + /* Copy over the SGL elements */ + scic_sds_request_build_sgl(sci_req); + + /* Copy over the number of bytes to be transfered */ + task_context->transfer_length_bytes = len; + + if (dir == DMA_TO_DEVICE) { + /* + * The difference between the DMA IN and DMA OUT request task type + * values are consistent with the difference between FPDMA READ + * and FPDMA WRITE values. Add the supplied task type parameter + * to this difference to set the task type properly for this + * DATA OUT (WRITE) case. */ + task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT + - SCU_TASK_TYPE_DMA_IN); + } else { + /* + * For the DATA IN (READ) case, simply save the supplied + * optimized task type. */ + task_context->task_type = optimized_task_type; + } +} + + + static enum sci_status scic_io_request_construct_sata(struct scic_sds_request *sci_req, u32 len, @@ -402,9 +590,11 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, struct isci_tmf *tmf = isci_request_access_tmf(ireq); if (tmf->tmf_code == isci_tmf_sata_srst_high || - tmf->tmf_code == isci_tmf_sata_srst_low) - return scic_sds_stp_soft_reset_request_construct(sci_req); - else { + tmf->tmf_code == isci_tmf_sata_srst_low) { + scu_stp_raw_request_construct_task_context(&sci_req->stp.req, + sci_req->task_context_buffer); + return SCI_SUCCESS; + } else { dev_err(scic_to_dev(sci_req->owning_controller), "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n", @@ -424,17 +614,27 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, } /* non data */ - if (task->data_dir == DMA_NONE) - return scic_sds_stp_non_data_request_construct(sci_req); + if (task->data_dir == DMA_NONE) { + scu_stp_raw_request_construct_task_context(&sci_req->stp.req, + sci_req->task_context_buffer); + return SCI_SUCCESS; + } /* NCQ */ - if (task->ata_task.use_ncq) - return scic_sds_stp_ncq_request_construct(sci_req, len, dir); + if (task->ata_task.use_ncq) { + scic_sds_stp_optimized_request_construct(sci_req, + SCU_TASK_TYPE_FPDMAQ_READ, + len, dir); + return SCI_SUCCESS; + } /* DMA */ - if (task->ata_task.dma_xfer) - return scic_sds_stp_udma_request_construct(sci_req, len, dir); - else /* PIO */ + if (task->ata_task.dma_xfer) { + scic_sds_stp_optimized_request_construct(sci_req, + SCU_TASK_TYPE_DMA_IN, + len, dir); + return SCI_SUCCESS; + } else /* PIO */ return scic_sds_stp_pio_request_construct(sci_req, copy); return status; @@ -453,9 +653,8 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_reque scic_sds_io_request_build_ssp_command_iu(sci_req); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); return SCI_SUCCESS; } @@ -470,12 +669,11 @@ enum sci_status scic_task_request_construct_ssp( scic_sds_task_request_build_ssp_task_iu(sci_req); sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + SCI_BASE_REQUEST_STATE_CONSTRUCTED); return SCI_SUCCESS; } - static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) { enum sci_status status; @@ -496,12 +694,11 @@ static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_requ if (status == SCI_SUCCESS) sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + SCI_BASE_REQUEST_STATE_CONSTRUCTED); return status; } - enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) { enum sci_status status = SCI_SUCCESS; @@ -513,7 +710,8 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - status = scic_sds_stp_soft_reset_request_construct(sci_req); + scu_stp_raw_request_construct_task_context(&sci_req->stp.req, + sci_req->task_context_buffer); } else { dev_err(scic_to_dev(sci_req->owning_controller), "%s: Request 0x%p received un-handled SAT " @@ -524,10 +722,10 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re } } - if (status == SCI_SUCCESS) - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + if (status != SCI_SUCCESS) + return status; + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_CONSTRUCTED); return status; } @@ -724,7 +922,7 @@ static enum sci_status scic_sds_request_constructed_state_start_handler( /* Everything is good go ahead and change state */ sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_STARTED); + SCI_BASE_REQUEST_STATE_STARTED); return SCI_SUCCESS; } @@ -749,29 +947,14 @@ static enum sci_status scic_sds_request_constructed_state_abort_handler( SCI_FAILURE_IO_TERMINATED); sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); return SCI_SUCCESS; } -/* - * ***************************************************************************** - * * STARTED STATE HANDLERS - * ***************************************************************************** */ - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. Since the request - * has been posted to the hardware the io request state is changed to the - * aborting state. enum sci_status SCI_SUCCESS - */ -enum sci_status scic_sds_request_started_state_abort_handler( - struct scic_sds_request *request) +static enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req) { - if (request->has_started_substate_machine) - sci_base_state_machine_stop(&request->started_substate_machine); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); return SCI_SUCCESS; } @@ -943,19 +1126,15 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc */ /* In all cases we will treat this as the completion of the IO req. */ - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); return SCI_SUCCESS; } enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) { - if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED && - request->has_started_substate_machine == false) - return scic_sds_request_started_state_tc_completion_handler(request, completion_code); - else if (request->state_handlers->tc_completion_handler) + if (request->state_handlers->tc_completion_handler) return request->state_handlers->tc_completion_handler(request, completion_code); dev_warn(scic_to_dev(request->owning_controller), @@ -1064,7 +1243,7 @@ static enum sci_status scic_sds_request_completed_state_complete_handler( } sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_FINAL); + SCI_BASE_REQUEST_STATE_FINAL); return SCI_SUCCESS; } @@ -1084,7 +1263,7 @@ static enum sci_status scic_sds_request_aborting_state_abort_handler( struct scic_sds_request *request) { sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); return SCI_SUCCESS; } @@ -1107,7 +1286,7 @@ static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( ); sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); break; default: @@ -1161,7 +1340,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi SCI_SUCCESS); sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): @@ -1178,7 +1357,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi completion_code); sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); break; default: @@ -1192,7 +1371,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi ); sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); break; } @@ -1215,9 +1394,9 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler struct scic_sds_request *request) { sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); + SCI_BASE_REQUEST_STATE_ABORTING); sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); return SCI_SUCCESS; } @@ -1243,7 +1422,7 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler scic_sds_io_request_copy_response(request); sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); scic_sds_controller_release_frame(request->owning_controller, frame_index); return SCI_SUCCESS; @@ -1270,13 +1449,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler /* * In the AWAIT RESPONSE state, any TC completion is unexpected. * but if the TC has success status, we complete the IO anyway. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): @@ -1288,13 +1465,11 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler * is not able to send smp response within 2 ms. This causes our hardware * break the connection and set TC completion with one of these SMP_XXX_XX_ERR * status. For these type of error, we ask scic user to retry the request. */ - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, + SCI_FAILURE_RETRY_REQUIRED); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; default: @@ -1307,9 +1482,8 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR ); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } @@ -1365,7 +1539,7 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); } else { /* This was not a response frame why did it get forwarded? */ dev_err(scic_to_dev(sci_req->owning_controller), @@ -1378,46 +1552,921 @@ scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_r scic_sds_request_set_status( sci_req, - SCU_TASK_DONE_SMP_FRM_TYPE_ERR, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + SCU_TASK_DONE_SMP_FRM_TYPE_ERR, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } + + scic_sds_controller_release_frame(sci_req->owning_controller, + frame_index); + + return SCI_SUCCESS; +} + +/** + * This method processes the completions transport layer (TL) status to + * determine if the SMP request was sent successfully. If the SMP request + * was sent successfully, then the state for the SMP request transits to + * waiting for a response frame. + * @sci_req: This parameter specifies the request for which the TC + * completion was received. + * @completion_code: This parameter indicates the completion status information + * for the TC. + * + * Indicate if the tc completion handler was successful. SCI_SUCCESS currently + * this method always returns success. + */ +static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, + u16 ncq_tag) +{ + /** + * @note This could be made to return an error to the user if the user + * attempts to set the NCQ tag in the wrong state. + */ + req->task_context_buffer->type.stp.ncq_tag = ncq_tag; +} + +/** + * + * @sci_req: + * + * Get the next SGL element from the request. - Check on which SGL element pair + * we are working - if working on SLG pair element A - advance to element B - + * else - check to see if there are more SGL element pairs for this IO request + * - if there are more SGL element pairs - advance to the next pair and return + * element A struct scu_sgl_element* + */ +static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) +{ + struct scu_sgl_element *current_sgl; + struct scic_sds_request *sci_req = to_sci_req(stp_req); + struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; + + if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + if (pio_sgl->sgl_pair->B.address_lower == 0 && + pio_sgl->sgl_pair->B.address_upper == 0) { + current_sgl = NULL; + } else { + pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; + current_sgl = &pio_sgl->sgl_pair->B; + } + } else { + if (pio_sgl->sgl_pair->next_pair_lower == 0 && + pio_sgl->sgl_pair->next_pair_upper == 0) { + current_sgl = NULL; + } else { + u64 phys_addr; + + phys_addr = pio_sgl->sgl_pair->next_pair_upper; + phys_addr <<= 32; + phys_addr |= pio_sgl->sgl_pair->next_pair_lower; + + pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); + pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; + current_sgl = &pio_sgl->sgl_pair->A; + } + } + + return current_sgl; +} + +/** + * + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. + */ +static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +/** + * + * @request: This parameter specifies the request for which a frame has been + * received. + * @frame_index: This parameter specifies the index of the frame that has been + * received. + * + * This method processes frames received from the target while waiting for a + * device to host register FIS. If a non-register FIS is received during this + * time, it is treated as a protocol violation from an IO perspective. Indicate + * if the received frame was processed successfully. + */ +static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(sci_req->owning_controller), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ + +/* transmit DATA_FIS from (current sgl + offset) for input + * parameter length. current sgl and offset is alreay stored in the IO request + */ +static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( + struct scic_sds_request *sci_req, + u32 length) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scu_task_context *task_context; + struct scu_sgl_element *current_sgl; + + /* Recycle the TC and reconstruct it for sending out DATA FIS containing + * for the data from current_sgl+offset for the input length + */ + task_context = scic_sds_controller_get_task_context_buffer(scic, + sci_req->io_tag); + + if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) + current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; + else + current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; + + /* update the TC */ + task_context->command_iu_upper = current_sgl->address_upper; + task_context->command_iu_lower = current_sgl->address_lower; + task_context->transfer_length_bytes = length; + task_context->type.stp.fis_type = FIS_DATA; + + /* send the new TC out. */ + return scic_controller_continue_io(sci_req); +} + +static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) +{ + + struct scu_sgl_element *current_sgl; + u32 sgl_offset; + u32 remaining_bytes_in_current_sgl = 0; + enum sci_status status = SCI_SUCCESS; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + sgl_offset = stp_req->type.pio.request_current.sgl_offset; + + if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); + remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; + } else { + current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); + remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; + } + + + if (stp_req->type.pio.pio_transfer_bytes > 0) { + if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { + /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ + status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); + if (status == SCI_SUCCESS) { + stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; + + /* update the current sgl, sgl_offset and save for future */ + current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); + sgl_offset = 0; + } + } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { + /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ + scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); + + if (status == SCI_SUCCESS) { + /* Sgl offset will be adjusted and saved for future */ + sgl_offset += stp_req->type.pio.pio_transfer_bytes; + current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; + stp_req->type.pio.pio_transfer_bytes = 0; + } + } + } + + if (status == SCI_SUCCESS) { + stp_req->type.pio.request_current.sgl_offset = sgl_offset; + } + + return status; +} + +/** + * + * @stp_request: The request that is used for the SGL processing. + * @data_buffer: The buffer of data to be copied. + * @length: The length of the data transfer. + * + * Copy the data from the buffer for the length specified to the IO reqeust SGL + * specified data region. enum sci_status + */ +static enum sci_status +scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, + u8 *data_buf, u32 len) +{ + struct scic_sds_request *sci_req; + struct isci_request *ireq; + u8 *src_addr; + int copy_len; + struct sas_task *task; + struct scatterlist *sg; + void *kaddr; + int total_len = len; + + sci_req = to_sci_req(stp_req); + ireq = sci_req_to_ireq(sci_req); + task = isci_request_access_task(ireq); + src_addr = data_buf; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (total_len > 0) { + struct page *page = sg_page(sg); + + copy_len = min_t(int, total_len, sg_dma_len(sg)); + kaddr = kmap_atomic(page, KM_IRQ0); + memcpy(kaddr + sg->offset, src_addr, copy_len); + kunmap_atomic(kaddr, KM_IRQ0); + total_len -= copy_len; + src_addr += copy_len; + sg = sg_next(sg); + } + } else { + BUG_ON(task->total_xfer_len < total_len); + memcpy(task->scatter, src_addr, total_len); + } + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: The PIO DATA IN request that is to receive the data. + * @data_buffer: The buffer to copy from. + * + * Copy the data buffer to the io request data region. enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( + struct scic_sds_stp_request *sci_req, + u8 *data_buffer) +{ + enum sci_status status; + + /* + * If there is less than 1K remaining in the transfer request + * copy just the data for the transfer */ + if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { + status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); + + if (status == SCI_SUCCESS) + sci_req->type.pio.pio_transfer_bytes = 0; + } else { + /* We are transfering the whole frame so copy */ + status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); + + if (status == SCI_SUCCESS) + sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; + } + + return status; +} + +/** + * + * @sci_req: + * @completion_code: + * + * enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED + ); + break; + } + + return status; +} + +static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_PIO_SETUP: + /* Get from the frame buffer the PIO Setup Data */ + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + /* Get the data from the PIO Setup The SCU Hardware returns + * first word in the frame_header and the rest of the data is in + * the frame buffer so we need to back up one dword + */ + + /* transfer_count: first 16bits in the 4th dword */ + stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; + + /* ending_status: 4th byte in the 3rd dword */ + stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + sci_req->stp.rsp.status = stp_req->type.pio.ending_status; + + /* The next state is dependent on whether the + * request was PIO Data-in or Data out + */ + if (task->data_dir == DMA_FROM_DEVICE) { + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); + } else if (task->data_dir == DMA_TO_DEVICE) { + /* Transmit data */ + status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + if (status != SCI_SUCCESS) + break; + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); + } + break; + case FIS_SETDEVBITS: + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + break; + case FIS_REGD2H: + if (frame_header->status & ATA_BUSY) { + /* Now why is the drive sending a D2H Register FIS when + * it is still busy? Do nothing since we are still in + * the right state. + */ + dev_dbg(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received " + "D2H Register FIS with BSY status " + "0x%x\n", __func__, stp_req, + frame_header->status); + break; + } + + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.req, + frame_header, + frame_buffer); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + default: + /* FIXME: what do we do here? */ + break; + } + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + struct sata_fis_data *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + if (frame_header->fis_type == FIS_DATA) { + if (stp_req->type.pio.request_current.sgl_pair == NULL) { + sci_req->saved_rx_frame_index = frame_index; + stp_req->type.pio.pio_transfer_bytes = 0; + } else { + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, + (u8 *)frame_buffer); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + } + + /* Check for the end of the transfer, are there more + * bytes remaining for this data transfer + */ + if (status != SCI_SUCCESS || + stp_req->type.pio.pio_transfer_bytes != 0) + return status; + + if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } else { + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + } + } else { + dev_err(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received frame %d " + "with fis type 0x%02x when expecting a data " + "fis.\n", __func__, stp_req, frame_index, + frame_header->fis_type); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + } + + return status; +} + + +/** + * + * @sci_req: + * @completion_code: + * + * enum sci_status + */ +static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( + + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + bool all_frames_transferred = false; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* Transmit data */ + if (stp_req->type.pio.pio_transfer_bytes != 0) { + status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + if (status == SCI_SUCCESS) { + if (stp_req->type.pio.pio_transfer_bytes == 0) + all_frames_transferred = true; + } + } else if (stp_req->type.pio.pio_transfer_bytes == 0) { + /* + * this will happen if the all data is written at the + * first time after the pio setup fis is received + */ + all_frames_transferred = true; + } + + /* all data transferred. */ + if (all_frames_transferred) { + /* + * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE + * and wait for PIO_SETUP fis / or D2H REg fis. */ + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + } + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED + ); + break; + } + + return status; +} + +/** + * + * @request: This is the request which is receiving the event. + * @event_code: This is the event code that the request on which the request is + * expected to take action. + * + * This method will handle any link layer events while waiting for the data + * frame. enum sci_status SCI_SUCCESS SCI_FAILURE + */ +static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler( + struct scic_sds_request *request, + u32 event_code) +{ + enum sci_status status; + + switch (scu_get_event_specifier(event_code)) { + case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: + /* + * We are waiting for data and the SCU has R_ERR the data frame. + * Go back to waiting for the D2H Register FIS */ + sci_base_state_machine_change_state( + &request->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + + status = SCI_SUCCESS; + break; + + default: + dev_err(scic_to_dev(request->owning_controller), + "%s: SCIC PIO Request 0x%p received unexpected " + "event 0x%08x\n", + __func__, request, event_code); + + /* / @todo Should we fail the PIO request when we get an unexpected event? */ + status = SCI_FAILURE; + break; + } + + return status; +} + +static void scic_sds_stp_request_udma_complete_request( + struct scic_sds_request *request, + u32 scu_status, + enum sci_status sci_status) +{ + scic_sds_request_set_status(request, scu_status, sci_status); + sci_base_state_machine_change_state(&request->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); +} + +static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if ((status == SCI_SUCCESS) && + (frame_header->fis_type == FIS_REGD2H)) { + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + } + + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + +static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + /* + * We must check ther response buffer to see if the D2H Register FIS was + * received before we got the TC completion. */ + if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { + scic_sds_remote_device_suspend(sci_req->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + } else { + /* + * If we have an error completion status for the TC then we can expect a + * D2H register FIS from the device so we must change state to wait for it */ + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); + } + break; + + /* + * / @todo Check to see if any of these completion status need to wait for + * / the device to host register fis. */ + /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): + scic_sds_remote_device_suspend(sci_req->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + /* Fall through to the default case */ + default: + /* All other completion status cause the IO to be complete. */ + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + break; + } + + return status; +} + +static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + + /* Use the general frame handler to copy the resposne data */ + status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); + + if (status != SCI_SUCCESS) + return status; + + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + return status; +} + +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 len, + enum dma_data_direction dir) +{ + return SCI_SUCCESS; +} + +/** + * + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. + */ +static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler( + struct scic_sds_request *sci_req, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + scic_sds_request_set_status( + sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE + ); + break; + + default: + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + break; } - scic_sds_controller_release_frame(sci_req->owning_controller, - frame_index); - return SCI_SUCCESS; } /** - * This method processes the completions transport layer (TL) status to - * determine if the SMP request was sent successfully. If the SMP request - * was sent successfully, then the state for the SMP request transits to - * waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. + * @sci_req: + * @completion_code: + * + * This method processes a TC completion. The expected TC completion is for + * the transmission of the H2D register FIS containing the SATA/STP non-data + * request. This method always successfully processes the TC completion. + * SCI_SUCCESS This value is always returned. */ -static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( +static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler( struct scic_sds_request *sci_req, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE); break; default: @@ -1430,15 +2479,83 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR ); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } return SCI_SUCCESS; } +/** + * + * @request: This parameter specifies the request for which a frame has been + * received. + * @frame_index: This parameter specifies the index of the frame that has been + * received. + * + * This method processes frames received from the target while waiting for a + * device to host register FIS. If a non-register FIS is received during this + * time, it is treated as a protocol violation from an IO perspective. Indicate + * if the received frame was processed successfully. + */ +static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( + struct scic_sds_request *sci_req, + u32 frame_index) +{ + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = sci_req->owning_controller; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; +} + static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { }, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { @@ -1467,6 +2584,52 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, + .frame_handler = scic_sds_stp_request_udma_general_frame_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, + .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { + .abort_handler = scic_sds_request_started_state_abort_handler, + .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, + }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, @@ -2210,15 +3373,6 @@ static void scic_sds_request_constructed_state_enter(void *object) ); } -/** - * scic_sds_request_started_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a - * SCSI Task request we must enter the started substate machine. none - */ static void scic_sds_request_started_state_enter(void *object) { struct scic_sds_request *sci_req = object; @@ -2238,39 +3392,35 @@ static void scic_sds_request_started_state_enter(void *object) SCI_BASE_REQUEST_STATE_STARTED ); - /* Most of the request state machines have a started substate machine so - * start its execution on the entry to the started state. + /* all unaccelerated request types (non ssp or ncq) handled with + * substates */ - if (sci_req->has_started_substate_machine == true) - sci_base_state_machine_start(&sci_req->started_substate_machine); - if (!task && dev->dev_type == SAS_END_DEV) { sci_base_state_machine_change_state(sm, SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); + } else if (!task && + (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || + isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { + sci_base_state_machine_change_state(sm, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { sci_base_state_machine_change_state(sm, SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); + } else if (task && sas_protocol_ata(task->task_proto) && + !task->ata_task.use_ncq) { + u32 state; + + if (task->data_dir == DMA_NONE) + state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE; + else if (task->ata_task.dma_xfer) + state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE; + else /* PIO */ + state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE; + + sci_base_state_machine_change_state(sm, state); } } -/** - * scic_sds_request_started_state_exit() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when exiting the - * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be - * to stop the started substate machine. none - */ -static void scic_sds_request_started_state_exit(void *object) -{ - struct scic_sds_request *sci_req = object; - - if (sci_req->has_started_substate_machine == true) - sci_base_state_machine_stop(&sci_req->started_substate_machine); -} - /** * scic_sds_request_completed_state_enter() - * @object: This parameter specifies the base object for which the state @@ -2392,6 +3542,175 @@ static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void ); } +static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req + ); +} + +static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE + ); +} + + + +static void scic_sds_stp_request_started_pio_await_h2d_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req); +} + +static void scic_sds_stp_request_started_pio_await_frame_enter(void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); +} + +static void scic_sds_stp_request_started_pio_data_in_await_data_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE + ); +} + +static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE + ); +} + + + +static void scic_sds_stp_request_started_udma_await_tc_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE + ); +} + +/** + * + * + * This state is entered when there is an TC completion failure. The hardware + * received an unexpected condition while processing the IO request and now + * will UF the D2H register FIS to complete the IO. + */ +static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE + ); +} + + + +static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE + ); + + scic_sds_remote_device_set_working_request( + sci_req->target_device, sci_req + ); +} + +static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + struct scu_task_context *task_context; + struct host_to_dev_fis *h2d_fis; + enum sci_status status; + + /* Clear the SRST bit */ + h2d_fis = &sci_req->stp.cmd; + h2d_fis->control = 0; + + /* Clear the TC control bit */ + task_context = scic_sds_controller_get_task_context_buffer( + sci_req->owning_controller, sci_req->io_tag); + task_context->control_frame = 0; + + status = scic_controller_continue_io(sci_req); + if (status == SCI_SUCCESS) { + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE + ); + } +} + +static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( + void *object) +{ + struct scic_sds_request *sci_req = object; + + SET_STATE_HANDLER( + sci_req, + scic_sds_request_state_handler_table, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE + ); +} + static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = { .enter_state = scic_sds_request_initial_state_enter, @@ -2401,7 +3720,39 @@ static const struct sci_base_state scic_sds_request_state_table[] = { }, [SCI_BASE_REQUEST_STATE_STARTED] = { .enter_state = scic_sds_request_started_state_enter, - .exit_state = scic_sds_request_started_state_exit + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_await_frame_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, + }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { + .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter, }, [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, @@ -2437,7 +3788,6 @@ static void scic_sds_general_request_construct(struct scic_sds_controller *scic, sci_req->io_tag = io_tag; sci_req->owning_controller = scic; sci_req->target_device = sci_dev; - sci_req->has_started_substate_machine = false; sci_req->protocol = SCIC_NO_PROTOCOL; sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); @@ -3065,6 +4415,3 @@ int isci_request_execute( *isci_request = request; return ret; } - - - diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index d090cb1a14d..95b65891fc4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -59,7 +59,6 @@ #include "isci.h" #include "host.h" #include "scu_task_context.h" -#include "stp_request.h" /** * struct isci_request_status - This enum defines the possible states of an I/O @@ -90,6 +89,63 @@ enum sci_request_protocol { SCIC_STP_PROTOCOL }; /* XXX remove me, use sas_task.{dev|task_proto} instead */; +struct scic_sds_stp_request { + union { + u32 ncq; + + u32 udma; + + struct scic_sds_stp_pio_request { + /** + * Total transfer for the entire PIO request recorded at request constuction + * time. + * + * @todo Should we just decrement this value for each byte of data transitted + * or received to elemenate the current_transfer_bytes field? + */ + u32 total_transfer_bytes; + + /** + * Total number of bytes received/transmitted in data frames since the start + * of the IO request. At the end of the IO request this should equal the + * total_transfer_bytes. + */ + u32 current_transfer_bytes; + + /** + * The number of bytes requested in the in the PIO setup. + */ + u32 pio_transfer_bytes; + + /** + * PIO Setup ending status value to tell us if we need to wait for another FIS + * or if the transfer is complete. On the receipt of a D2H FIS this will be + * the status field of that FIS. + */ + u8 ending_status; + + /** + * On receipt of a D2H FIS this will be the ending error field if the + * ending_status has the SATA_STATUS_ERR bit set. + */ + u8 ending_error; + + struct scic_sds_request_pio_sgl { + struct scu_sgl_element_pair *sgl_pair; + u8 sgl_set; + u32 sgl_offset; + } request_current; + } pio; + + struct { + /** + * The number of bytes requested in the PIO setup before CDB data frame. + */ + u32 device_preferred_cdb_length; + } packet; + } type; +}; + struct scic_sds_request { /** * This field contains the information for the base request state machine. @@ -158,12 +214,6 @@ struct scic_sds_request { */ bool is_task_management_request; - /** - * This field indicates that this request contains an initialized started - * substate machine. - */ - bool has_started_substate_machine; - /** * This field is a pointer to the stored rx frame data. It is used in STP * internal requests and SMP response frames. If this field is non-NULL the @@ -173,12 +223,6 @@ struct scic_sds_request { */ u32 saved_rx_frame_index; - /** - * This field specifies the data necessary to manage the sub-state - * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state. - */ - struct sci_base_state_machine started_substate_machine; - /** * This field specifies the current state handlers in place for this * IO Request object. This field is updated each time the request @@ -295,6 +339,41 @@ enum sci_base_request_states { */ SCI_BASE_REQUEST_STATE_STARTED, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE, + + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE, + + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, + + /** + * While in this state the IO request object is waiting for the TC completion + * notification for the H2D Register FIS + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE, + + /** + * While in this state the IO request object is waiting for either a PIO Setup + * FIS or a D2H register FIS. The type of frame received is based on the + * result of the prior frame and line conditions. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE, + + /** + * While in this state the IO request object is waiting for a DATA frame from + * the device. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE, + + /** + * While in this state the IO request object is waiting to transmit the next data + * frame to the device. + */ + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, + /** * The AWAIT_TC_COMPLETION sub-state indicates that the started raw * task management request is waiting for the transmission of the @@ -383,8 +462,6 @@ struct scic_sds_io_request_state_handler { }; -extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[]; - /** * scic_sds_request_get_controller() - * @@ -473,7 +550,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completi (scu_sge).address_modifier = 0; \ } -void scic_sds_request_build_sgl(struct scic_sds_request *sci_req); enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, @@ -481,8 +557,6 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, u32 frame_index); enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); -enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req); - /* XXX open code in caller */ static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, @@ -778,6 +852,9 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); +enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, + u32 transfer_length, + enum dma_data_direction dir); void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/stp_request.c b/drivers/scsi/isci/stp_request.c deleted file mode 100644 index e94ece81ed9..00000000000 --- a/drivers/scsi/isci/stp_request.c +++ /dev/null @@ -1,1584 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "sas.h" -#include "state_machine.h" -#include "remote_device.h" -#include "stp_request.h" -#include "unsolicited_frame_control.h" -#include "scu_completion_codes.h" -#include "scu_event_codes.h" -#include "scu_task_context.h" -#include "request.h" - -/** - * This method is will fill in the SCU Task Context for any type of SATA - * request. This is called from the various SATA constructors. - * @sci_req: The general IO request object which is to be used in - * constructing the SCU task context. - * @task_context: The buffer pointer for the SCU task context which is being - * constructed. - * - * The general io request construction is complete. The buffer assignment for - * the command buffer is complete. none Revisit task context construction to - * determine what is common for SSP/SMP/STP task context structures. - */ -static void scu_sata_reqeust_construct_task_context( - struct scic_sds_request *sci_req, - struct scu_task_context *task_context) -{ - dma_addr_t dma_addr; - struct scic_sds_controller *controller; - struct scic_sds_remote_device *target_device; - struct scic_sds_port *target_port; - - controller = scic_sds_request_get_controller(sci_req); - target_device = scic_sds_request_get_device(sci_req); - target_port = scic_sds_request_get_port(sci_req); - - /* Fill in the TC with the its required data */ - task_context->abort = 0; - task_context->priority = SCU_TASK_PRIORITY_NORMAL; - task_context->initiator_request = 1; - task_context->connection_rate = target_device->connection_rate; - task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = - scic_sds_port_get_index(target_port); - task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; - task_context->valid = SCU_TASK_CONTEXT_VALID; - task_context->context_type = SCU_TASK_CONTEXT_TYPE; - - task_context->remote_node_index = - scic_sds_remote_device_get_index(sci_req->target_device); - task_context->command_code = 0; - - task_context->link_layer_control = 0; - task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 0; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - task_context->address_modifier = 0; - task_context->task_phase = 0x01; - - task_context->ssp_command_iu_length = - (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); - - /* Set the first word of the H2D REG FIS */ - task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; - - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - scic_sds_io_tag_get_index(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - - /* - * Copy the physical address for the command buffer to the SCU Task - * Context. We must offset the command buffer by 4 bytes because the - * first 4 bytes are transfered in the body of the TC. - */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->stp.cmd) + - sizeof(u32)); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); - - /* SATA Requests do not have a response buffer */ - task_context->response_iu_upper = 0; - task_context->response_iu_lower = 0; -} - -/** - * - * @sci_req: - * - * This method will perform any general sata request construction. What part of - * SATA IO request construction is general? none - */ -static void scic_sds_stp_non_ncq_request_construct( - struct scic_sds_request *sci_req) -{ - sci_req->has_started_substate_machine = true; -} - -/** - * - * @sci_req: This parameter specifies the request to be constructed as an - * optimized request. - * @optimized_task_type: This parameter specifies whether the request is to be - * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A - * value of 1 indicates NCQ. - * - * This method will perform request construction common to all types of STP - * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method - * returns an indication as to whether the construction was successful. - */ -static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, - u8 optimized_task_type, - u32 len, - enum dma_data_direction dir) -{ - struct scu_task_context *task_context = sci_req->task_context_buffer; - - /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(sci_req, task_context); - - /* Copy over the SGL elements */ - scic_sds_request_build_sgl(sci_req); - - /* Copy over the number of bytes to be transfered */ - task_context->transfer_length_bytes = len; - - if (dir == DMA_TO_DEVICE) { - /* - * The difference between the DMA IN and DMA OUT request task type - * values are consistent with the difference between FPDMA READ - * and FPDMA WRITE values. Add the supplied task type parameter - * to this difference to set the task type properly for this - * DATA OUT (WRITE) case. */ - task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT - - SCU_TASK_TYPE_DMA_IN); - } else { - /* - * For the DATA IN (READ) case, simply save the supplied - * optimized task type. */ - task_context->task_type = optimized_task_type; - } -} - -/** - * - * @sci_req: This parameter specifies the request to be constructed. - * - * This method will construct the STP UDMA request and its associated TC data. - * This method returns an indication as to whether the construction was - * successful. SCI_SUCCESS Currently this method always returns this value. - */ -enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - scic_sds_stp_optimized_request_construct(sci_req, - SCU_TASK_TYPE_FPDMAQ_READ, - len, dir); - return SCI_SUCCESS; -} - -/** - * scu_stp_raw_request_construct_task_context - - * @sci_req: This parameter specifies the STP request object for which to - * construct a RAW command frame task context. - * @task_context: This parameter specifies the SCU specific task context buffer - * to construct. - * - * This method performs the operations common to all SATA/STP requests - * utilizing the raw frame method. none - */ -static void scu_stp_raw_request_construct_task_context( - struct scic_sds_stp_request *stp_req, - struct scu_task_context *task_context) -{ - struct scic_sds_request *sci_req = to_sci_req(stp_req); - - scu_sata_reqeust_construct_task_context(sci_req, task_context); - - task_context->control_frame = 0; - task_context->priority = SCU_TASK_PRIORITY_NORMAL; - task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; - task_context->type.stp.fis_type = FIS_REGH2D; - task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); -} - -void scic_stp_io_request_set_ncq_tag( - struct scic_sds_request *req, - u16 ncq_tag) -{ - /** - * @note This could be made to return an error to the user if the user - * attempts to set the NCQ tag in the wrong state. - */ - req->task_context_buffer->type.stp.ncq_tag = ncq_tag; -} - -/** - * - * @sci_req: - * - * Get the next SGL element from the request. - Check on which SGL element pair - * we are working - if working on SLG pair element A - advance to element B - - * else - check to see if there are more SGL element pairs for this IO request - * - if there are more SGL element pairs - advance to the next pair and return - * element A struct scu_sgl_element* - */ -static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) -{ - struct scu_sgl_element *current_sgl; - struct scic_sds_request *sci_req = to_sci_req(stp_req); - struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; - - if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - if (pio_sgl->sgl_pair->B.address_lower == 0 && - pio_sgl->sgl_pair->B.address_upper == 0) { - current_sgl = NULL; - } else { - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; - current_sgl = &pio_sgl->sgl_pair->B; - } - } else { - if (pio_sgl->sgl_pair->next_pair_lower == 0 && - pio_sgl->sgl_pair->next_pair_upper == 0) { - current_sgl = NULL; - } else { - u64 phys_addr; - - phys_addr = pio_sgl->sgl_pair->next_pair_upper; - phys_addr <<= 32; - phys_addr |= pio_sgl->sgl_pair->next_pair_lower; - - pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; - current_sgl = &pio_sgl->sgl_pair->A; - } - } - - return current_sgl; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, - } -}; - -static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_non_data_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_non_data_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter, - }, -}; - -enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_non_data_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} - -#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ - -/* transmit DATA_FIS from (current sgl + offset) for input - * parameter length. current sgl and offset is alreay stored in the IO request - */ -static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( - struct scic_sds_request *sci_req, - u32 length) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scu_task_context *task_context; - struct scu_sgl_element *current_sgl; - - /* Recycle the TC and reconstruct it for sending out DATA FIS containing - * for the data from current_sgl+offset for the input length - */ - task_context = scic_sds_controller_get_task_context_buffer(scic, - sci_req->io_tag); - - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) - current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; - else - current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; - - /* update the TC */ - task_context->command_iu_upper = current_sgl->address_upper; - task_context->command_iu_lower = current_sgl->address_lower; - task_context->transfer_length_bytes = length; - task_context->type.stp.fis_type = FIS_DATA; - - /* send the new TC out. */ - return scic_controller_continue_io(sci_req); -} - -static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) -{ - - struct scu_sgl_element *current_sgl; - u32 sgl_offset; - u32 remaining_bytes_in_current_sgl = 0; - enum sci_status status = SCI_SUCCESS; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - sgl_offset = stp_req->type.pio.request_current.sgl_offset; - - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; - } else { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; - } - - - if (stp_req->type.pio.pio_transfer_bytes > 0) { - if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); - if (status == SCI_SUCCESS) { - stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; - - /* update the current sgl, sgl_offset and save for future */ - current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); - sgl_offset = 0; - } - } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ - scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); - - if (status == SCI_SUCCESS) { - /* Sgl offset will be adjusted and saved for future */ - sgl_offset += stp_req->type.pio.pio_transfer_bytes; - current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; - stp_req->type.pio.pio_transfer_bytes = 0; - } - } - } - - if (status == SCI_SUCCESS) { - stp_req->type.pio.request_current.sgl_offset = sgl_offset; - } - - return status; -} - -/** - * - * @stp_request: The request that is used for the SGL processing. - * @data_buffer: The buffer of data to be copied. - * @length: The length of the data transfer. - * - * Copy the data from the buffer for the length specified to the IO reqeust SGL - * specified data region. enum sci_status - */ -static enum sci_status -scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, - u8 *data_buf, u32 len) -{ - struct scic_sds_request *sci_req; - struct isci_request *ireq; - u8 *src_addr; - int copy_len; - struct sas_task *task; - struct scatterlist *sg; - void *kaddr; - int total_len = len; - - sci_req = to_sci_req(stp_req); - ireq = sci_req_to_ireq(sci_req); - task = isci_request_access_task(ireq); - src_addr = data_buf; - - if (task->num_scatter > 0) { - sg = task->scatter; - - while (total_len > 0) { - struct page *page = sg_page(sg); - - copy_len = min_t(int, total_len, sg_dma_len(sg)); - kaddr = kmap_atomic(page, KM_IRQ0); - memcpy(kaddr + sg->offset, src_addr, copy_len); - kunmap_atomic(kaddr, KM_IRQ0); - total_len -= copy_len; - src_addr += copy_len; - sg = sg_next(sg); - } - } else { - BUG_ON(task->total_xfer_len < total_len); - memcpy(task->scatter, src_addr, total_len); - } - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: The PIO DATA IN request that is to receive the data. - * @data_buffer: The buffer to copy from. - * - * Copy the data buffer to the io request data region. enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( - struct scic_sds_stp_request *sci_req, - u8 *data_buffer) -{ - enum sci_status status; - - /* - * If there is less than 1K remaining in the transfer request - * copy just the data for the transfer */ - if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); - - if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes = 0; - } else { - /* We are transfering the whole frame so copy */ - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); - - if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; - } - - return status; -} - -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); - break; - } - - return status; -} - -static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - struct dev_to_host_fis *frame_header; - enum sci_status status; - u32 *frame_buffer; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - switch (frame_header->fis_type) { - case FIS_PIO_SETUP: - /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - /* Get the data from the PIO Setup The SCU Hardware returns - * first word in the frame_header and the rest of the data is in - * the frame buffer so we need to back up one dword - */ - - /* transfer_count: first 16bits in the 4th dword */ - stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; - - /* ending_status: 4th byte in the 3rd dword */ - stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - sci_req->stp.rsp.status = stp_req->type.pio.ending_status; - - /* The next state is dependent on whether the - * request was PIO Data-in or Data out - */ - if (task->data_dir == DMA_FROM_DEVICE) { - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); - } else if (task->data_dir == DMA_TO_DEVICE) { - /* Transmit data */ - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status != SCI_SUCCESS) - break; - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); - } - break; - case FIS_SETDEVBITS: - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - break; - case FIS_REGD2H: - if (frame_header->status & ATA_BUSY) { - /* Now why is the drive sending a D2H Register FIS when - * it is still busy? Do nothing since we are still in - * the right state. - */ - dev_dbg(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received " - "D2H Register FIS with BSY status " - "0x%x\n", __func__, stp_req, - frame_header->status); - break; - } - - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.req, - frame_header, - frame_buffer); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - default: - /* FIXME: what do we do here? */ - break; - } - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - struct sata_fis_data *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - if (frame_header->fis_type == FIS_DATA) { - if (stp_req->type.pio.request_current.sgl_pair == NULL) { - sci_req->saved_rx_frame_index = frame_index; - stp_req->type.pio.pio_transfer_bytes = 0; - } else { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, - (u8 *)frame_buffer); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - /* Check for the end of the transfer, are there more - * bytes remaining for this data transfer - */ - if (status != SCI_SUCCESS || - stp_req->type.pio.pio_transfer_bytes != 0) - return status; - - if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } else { - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - } - } else { - dev_err(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received frame %d " - "with fis type 0x%02x when expecting a data " - "fis.\n", __func__, stp_req, frame_index, - frame_header->fis_type); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_GOOD, - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - return status; -} - - -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( - - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - bool all_frames_transferred = false; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* Transmit data */ - if (stp_req->type.pio.pio_transfer_bytes != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status == SCI_SUCCESS) { - if (stp_req->type.pio.pio_transfer_bytes == 0) - all_frames_transferred = true; - } - } else if (stp_req->type.pio.pio_transfer_bytes == 0) { - /* - * this will happen if the all data is written at the - * first time after the pio setup fis is received - */ - all_frames_transferred = true; - } - - /* all data transferred. */ - if (all_frames_transferred) { - /* - * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE - * and wait for PIO_SETUP fis / or D2H REg fis. */ - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - } - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); - break; - } - - return status; -} - -/** - * - * @request: This is the request which is receiving the event. - * @event_code: This is the event code that the request on which the request is - * expected to take action. - * - * This method will handle any link layer events while waiting for the data - * frame. enum sci_status SCI_SUCCESS SCI_FAILURE - */ -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler( - struct scic_sds_request *request, - u32 event_code) -{ - enum sci_status status; - - switch (scu_get_event_specifier(event_code)) { - case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: - /* - * We are waiting for data and the SCU has R_ERR the data frame. - * Go back to waiting for the D2H Register FIS */ - sci_base_state_machine_change_state( - &request->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - - status = SCI_SUCCESS; - break; - - default: - dev_err(scic_to_dev(request->owning_controller), - "%s: SCIC PIO Request 0x%p received unexpected " - "event 0x%08x\n", - __func__, request, event_code); - - /* / @todo Should we fail the PIO request when we get an unexpected event? */ - status = SCI_FAILURE; - break; - } - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, - .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, - } -}; - -static void scic_sds_stp_request_started_pio_await_h2d_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req); -} - -static void scic_sds_stp_request_started_pio_await_frame_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); -} - -static void scic_sds_stp_request_started_pio_data_in_await_data_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE - ); -} - -static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_pio_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_await_frame_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter, - } -}; - -enum sci_status -scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, - bool copy_rx_frame) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - scu_stp_raw_request_construct_task_context(stp_req, - sci_req->task_context_buffer); - - pio->current_transfer_bytes = 0; - pio->ending_error = 0; - pio->ending_status = 0; - - pio->request_current.sgl_offset = 0; - pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; - - if (copy_rx_frame) { - scic_sds_request_build_sgl(sci_req); - /* Since the IO request copy of the TC contains the same data as - * the actual TC this pointer is vaild for either. - */ - pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; - } else { - /* The user does not want the data copied to the SGL buffer location */ - pio->request_current.sgl_pair = NULL; - } - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_pio_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} - -static void scic_sds_stp_request_udma_complete_request( - struct scic_sds_request *request, - u32 scu_status, - enum sci_status sci_status) -{ - scic_sds_request_set_status(request, scu_status, sci_status); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); -} - -static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - struct scic_sds_controller *scic = sci_req->owning_controller; - struct dev_to_host_fis *frame_header; - enum sci_status status; - u32 *frame_buffer; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if ((status == SCI_SUCCESS) && - (frame_header->fis_type == FIS_REGD2H)) { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - } - - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - break; - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): - /* - * We must check ther response buffer to see if the D2H Register FIS was - * received before we got the TC completion. */ - if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { - scic_sds_remote_device_suspend(sci_req->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - } else { - /* - * If we have an error completion status for the TC then we can expect a - * D2H register FIS from the device so we must change state to wait for it */ - sci_base_state_machine_change_state(&sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); - } - break; - - /* - * / @todo Check to see if any of these completion status need to wait for - * / the device to host register fis. */ - /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): - scic_sds_remote_device_suspend(sci_req->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - /* Fall through to the default case */ - default: - /* All other completion status cause the IO to be complete. */ - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - break; - } - - return status; -} - -static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - - /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); - - if (status != SCI_SUCCESS) - return status; - - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, - .frame_handler = scic_sds_stp_request_udma_general_frame_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, - }, -}; - -static void scic_sds_stp_request_started_udma_await_tc_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_udma_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); -} - -/** - * - * - * This state is entered when there is an TC completion failure. The hardware - * received an unexpected condition while processing the IO request and now - * will UF the D2H register FIS to complete the IO. - */ -static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_udma_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE - ); -} - -/* --------------------------------------------------------------------------- */ - -static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter, - }, -}; - -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - scic_sds_stp_non_ncq_request_construct(sci_req); - - scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN, - len, dir); - - sci_base_state_machine_construct( - &sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_udma_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) -{ - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); - - sci_base_state_machine_change_state( - &sci_req->started_substate_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE - ); - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - break; - } - - return SCI_SUCCESS; -} - -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -/* --------------------------------------------------------------------------- */ - -static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, - .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, - }, -}; - -static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - struct scu_task_context *task_context; - struct host_to_dev_fis *h2d_fis; - enum sci_status status; - - /* Clear the SRST bit */ - h2d_fis = &sci_req->stp.cmd; - h2d_fis->control = 0; - - /* Clear the TC control bit */ - task_context = scic_sds_controller_get_task_context_buffer( - sci_req->owning_controller, sci_req->io_tag); - task_context->control_frame = 0; - - status = scic_controller_continue_io(sci_req); - if (status == SCI_SUCCESS) { - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); - } -} - -static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_stp_request_started_soft_reset_substate_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE - ); -} - -static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = { - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter, - }, -}; - -enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req) -{ - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - scic_sds_stp_non_ncq_request_construct(sci_req); - - /* Build the STP task context structure */ - scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer); - - sci_base_state_machine_construct(&sci_req->started_substate_machine, - sci_req, - scic_sds_stp_request_started_soft_reset_substate_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); - - return SCI_SUCCESS; -} diff --git a/drivers/scsi/isci/stp_request.h b/drivers/scsi/isci/stp_request.h deleted file mode 100644 index eb14874ceda..00000000000 --- a/drivers/scsi/isci/stp_request.h +++ /dev/null @@ -1,195 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SCIC_SDS_STP_REQUEST_T_ -#define _SCIC_SDS_STP_REQUEST_T_ - -#include <linux/dma-mapping.h> -#include <scsi/sas.h> - -struct scic_sds_stp_request { - union { - u32 ncq; - - u32 udma; - - struct scic_sds_stp_pio_request { - /** - * Total transfer for the entire PIO request recorded at request constuction - * time. - * - * @todo Should we just decrement this value for each byte of data transitted - * or received to elemenate the current_transfer_bytes field? - */ - u32 total_transfer_bytes; - - /** - * Total number of bytes received/transmitted in data frames since the start - * of the IO request. At the end of the IO request this should equal the - * total_transfer_bytes. - */ - u32 current_transfer_bytes; - - /** - * The number of bytes requested in the in the PIO setup. - */ - u32 pio_transfer_bytes; - - /** - * PIO Setup ending status value to tell us if we need to wait for another FIS - * or if the transfer is complete. On the receipt of a D2H FIS this will be - * the status field of that FIS. - */ - u8 ending_status; - - /** - * On receipt of a D2H FIS this will be the ending error field if the - * ending_status has the SATA_STATUS_ERR bit set. - */ - u8 ending_error; - - struct scic_sds_request_pio_sgl { - struct scu_sgl_element_pair *sgl_pair; - u8 sgl_set; - u32 sgl_offset; - } request_current; - } pio; - - struct { - /** - * The number of bytes requested in the PIO setup before CDB data frame. - */ - u32 device_preferred_cdb_length; - } packet; - } type; -}; - -/** - * enum scic_sds_stp_request_started_udma_substates - This enumeration depicts - * the various sub-states associated with a SATA/STP UDMA protocol operation. - * - * - */ -enum scic_sds_stp_request_started_udma_substates { - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE, -}; - -/** - * enum scic_sds_stp_request_started_non_data_substates - This enumeration - * depicts the various sub-states associated with a SATA/STP non-data - * protocol operation. - * - * - */ -enum scic_sds_stp_request_started_non_data_substates { - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE, -}; - -/** - * enum scic_sds_stp_request_started_soft_reset_substates - THis enumeration - * depicts the various sub-states associated with a SATA/STP soft reset - * operation. - * - * - */ -enum scic_sds_stp_request_started_soft_reset_substates { - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, -}; - -/* This is the enumeration of the SATA PIO DATA IN started substate machine. */ -enum _scic_sds_stp_request_started_pio_substates { - /** - * While in this state the IO request object is waiting for the TC completion - * notification for the H2D Register FIS - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE, - - /** - * While in this state the IO request object is waiting for either a PIO Setup - * FIS or a D2H register FIS. The type of frame received is based on the - * result of the prior frame and line conditions. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE, - - /** - * While in this state the IO request object is waiting for a DATA frame from - * the device. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE, - - /** - * While in this state the IO request object is waiting to transmit the next data - * frame to the device. - */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, -}; - -struct scic_sds_request; - -enum sci_status scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, - bool copy_rx_frame); -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 transfer_length, - enum dma_data_direction dir); -enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req); -enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req); -enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req, - u32 transfer_length, - enum dma_data_direction dir); -#endif /* _SCIC_SDS_STP_REQUEST_T_ */ -- cgit v1.2.3-70-g09d2 From f00e6ba4996a34f098fe50c78077f0568fd838ec Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 10 May 2011 02:39:11 -0700 Subject: isci: unify request abort handlers Unify the implementation in scic_sds_io_request_terminate and kill the state handler. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 146 +++++++++++++++----------------------------- drivers/scsi/isci/request.h | 7 --- 2 files changed, 50 insertions(+), 103 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index f503e3e18d8..69688636347 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -777,16 +777,58 @@ scic_sds_request_start(struct scic_sds_request *request) } enum sci_status -scic_sds_io_request_terminate(struct scic_sds_request *request) +scic_sds_io_request_terminate(struct scic_sds_request *sci_req) { - if (request->state_handlers->abort_handler) - return request->state_handlers->abort_handler(request); + enum sci_base_request_states state; - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request requested to abort while in wrong " - "state %d\n", - __func__, - sci_base_state_machine_get_state(&request->state_machine)); + state = sci_req->state_machine.current_state_id; + + switch (state) { + case SCI_BASE_REQUEST_STATE_CONSTRUCTED: + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_TASK_ABORT, + SCI_FAILURE_IO_TERMINATED); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; + case SCI_BASE_REQUEST_STATE_STARTED: + case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: + case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: + case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: + case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); + return SCI_SUCCESS; + case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_ABORTING); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; + case SCI_BASE_REQUEST_STATE_ABORTING: + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + return SCI_SUCCESS; + case SCI_BASE_REQUEST_STATE_COMPLETED: + default: + dev_warn(scic_to_dev(sci_req->owning_controller), + "%s: SCIC IO Request requested to abort while in wrong " + "state %d\n", + __func__, + sci_base_state_machine_get_state(&sci_req->state_machine)); + break; + } return SCI_FAILURE_INVALID_STATE; } @@ -930,34 +972,6 @@ static enum sci_status scic_sds_request_constructed_state_start_handler( return SCI_FAILURE_INSUFFICIENT_RESOURCES; } -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. Since the request - * has not yet been posted to the hardware the request transitions to the - * completed state. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_constructed_state_abort_handler( - struct scic_sds_request *request) -{ - /* - * This request has been terminated by the user make sure that the correct - * status code is returned */ - scic_sds_request_set_status(request, - SCU_TASK_DONE_TASK_ABORT, - SCI_FAILURE_IO_TERMINATED); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - -static enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req) -{ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - return SCI_SUCCESS; -} - /* * scic_sds_request_started_state_tc_completion_handler() - This method process * TC (task context) completions for normal IO request (i.e. Task/Abort @@ -1247,26 +1261,6 @@ static enum sci_status scic_sds_request_completed_state_complete_handler( return SCI_SUCCESS; } -/* - * ***************************************************************************** - * * ABORTING STATE HANDLERS - * ***************************************************************************** */ - -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_terminate() request. This method is the - * io request aborting state abort handlers. On receipt of a multiple - * terminate requests the io request will transition to the completed state. - * This should not happen in normal operation. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_abort_handler( - struct scic_sds_request *request) -{ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - /* * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T * object receives a scic_sds_request_task_completion() request. This method @@ -1378,28 +1372,6 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi return SCI_SUCCESS; } -/** - * This method is responsible for processing a terminate/abort request for this - * TC while the request is waiting for the task management response - * unsolicited frame. - * @sci_req: This parameter specifies the request for which the - * termination was requested. - * - * This method returns an indication as to whether the abort request was - * successfully handled. need to update to ensure the received UF doesn't cause - * damage to subsequent requests (i.e. put the extended tag in a holding - * pattern for this particular device). - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler( - struct scic_sds_request *request) -{ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - return SCI_SUCCESS; -} - /** * This method processes an unsolicited frame while the task mgmt request is * waiting for a response frame. It will copy the response data, release @@ -2560,81 +2532,63 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han [SCI_BASE_REQUEST_STATE_INITIAL] = { }, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { .start_handler = scic_sds_request_constructed_state_start_handler, - .abort_handler = scic_sds_request_constructed_state_abort_handler, }, [SCI_BASE_REQUEST_STATE_STARTED] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, .frame_handler = scic_sds_request_started_state_frame_handler, }, [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, }, [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .abort_handler = scic_sds_ssp_task_request_await_tc_response_abort_handler, .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, }, [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, .frame_handler = scic_sds_smp_request_await_response_frame_handler, }, [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, .frame_handler = scic_sds_stp_request_udma_general_frame_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .abort_handler = scic_sds_request_started_state_abort_handler, .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, [SCI_BASE_REQUEST_STATE_ABORTING] = { - .abort_handler = scic_sds_request_aborting_state_abort_handler, .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, .frame_handler = scic_sds_request_aborting_state_frame_handler, }, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 95b65891fc4..b1733695231 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -444,12 +444,6 @@ struct scic_sds_io_request_state_handler { */ scic_sds_io_request_handler_t start_handler; - /** - * The abort_handler specifies the method invoked when a user attempts to - * abort a request. - */ - scic_sds_io_request_handler_t abort_handler; - /** * The complete_handler specifies the method invoked when a user attempts to * complete a request. @@ -459,7 +453,6 @@ struct scic_sds_io_request_state_handler { scic_sds_io_request_task_completion_handler_t tc_completion_handler; scic_sds_io_request_event_handler_t event_handler; scic_sds_io_request_frame_handler_t frame_handler; - }; /** -- cgit v1.2.3-70-g09d2 From f4636a7b2ab8288466b83a8459d47c43143a70dc Mon Sep 17 00:00:00 2001 From: Piotr Sawicki <piotr.sawicki@intel.com> Date: Tue, 10 May 2011 23:50:32 +0000 Subject: isci: unify request start handlers Unify the implementation in scic_sds_request_start and kill the state handler. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Piotr Sawicki <piotr.sawicki@intel.com> [remove scic_sds_request_constructed_state_start_handler] Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/remote_device.c | 6 +- drivers/scsi/isci/request.c | 156 +++++++++++++++++--------------------- drivers/scsi/isci/request.h | 6 -- 3 files changed, 74 insertions(+), 94 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 0bb639dfbad..606ee2be154 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -515,7 +515,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic if (status != SCI_SUCCESS) break; - status = sci_req->state_handlers->start_handler(sci_req); + status = scic_sds_request_start(sci_req); if (status != SCI_SUCCESS) break; @@ -540,7 +540,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic if (status != SCI_SUCCESS) break; - status = sci_req->state_handlers->start_handler(sci_req); + status = scic_sds_request_start(sci_req); } else return SCI_FAILURE_INVALID_STATE; break; @@ -709,7 +709,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc if (status != SCI_SUCCESS) goto out; - status = sci_req->state_handlers->start_handler(sci_req); + status = scic_sds_request_start(sci_req); if (status != SCI_SUCCESS) goto out; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 69688636347..41a418d8b33 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -757,23 +757,80 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) return ret_val; } -enum sci_status -scic_sds_request_start(struct scic_sds_request *request) +enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) { - if (request->device_sequence != - scic_sds_remote_device_get_sequence(request->target_device)) + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scu_task_context *task_context; + enum sci_base_request_states state; + + if (sci_req->device_sequence != + scic_sds_remote_device_get_sequence(sci_req->target_device)) return SCI_FAILURE; - if (request->state_handlers->start_handler) - return request->state_handlers->start_handler(request); + state = sci_req->state_machine.current_state_id; + if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) { + dev_warn(scic_to_dev(scic), + "%s: SCIC IO Request requested to start while in wrong " + "state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request requested to start while in wrong " - "state %d\n", - __func__, - sci_base_state_machine_get_state(&request->state_machine)); + /* if necessary, allocate a TCi for the io request object and then will, + * if necessary, copy the constructed TC data into the actual TC buffer. + * If everything is successful the post context field is updated with + * the TCi so the controller can post the request to the hardware. + */ + if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) + sci_req->io_tag = scic_controller_allocate_io_tag(scic); - return SCI_FAILURE_INVALID_STATE; + /* Record the IO Tag in the request */ + if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { + task_context = sci_req->task_context_buffer; + + task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag); + + switch (task_context->protocol_type) { + case SCU_TASK_CONTEXT_PROTOCOL_SMP: + case SCU_TASK_CONTEXT_PROTOCOL_SSP: + /* SSP/SMP Frame */ + task_context->type.ssp.tag = sci_req->io_tag; + task_context->type.ssp.target_port_transfer_tag = + 0xFFFF; + break; + + case SCU_TASK_CONTEXT_PROTOCOL_STP: + /* STP/SATA Frame + * task_context->type.stp.ncq_tag = sci_req->ncq_tag; + */ + break; + + case SCU_TASK_CONTEXT_PROTOCOL_NONE: + /* / @todo When do we set no protocol type? */ + break; + + default: + /* This should never happen since we build the IO + * requests */ + break; + } + + /* + * Check to see if we need to copy the task context buffer + * or have been building into the task context buffer */ + if (sci_req->was_tag_assigned_by_user == false) + scic_sds_controller_copy_task_context(scic, sci_req); + + /* Add to the post_context the io tag value */ + sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag); + + /* Everything is good go ahead and change state */ + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_STARTED); + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INSUFFICIENT_RESOURCES; } enum sci_status @@ -903,75 +960,6 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) memcpy(resp_buf, ssp_response->resp_data, len); } -/* - * This method implements the action taken when a constructed - * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request. - * This method will, if necessary, allocate a TCi for the io request object and - * then will, if necessary, copy the constructed TC data into the actual TC - * buffer. If everything is successful the post context field is updated with - * the TCi so the controller can post the request to the hardware. enum sci_status - * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES - */ -static enum sci_status scic_sds_request_constructed_state_start_handler( - struct scic_sds_request *request) -{ - struct scu_task_context *task_context; - - if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - request->io_tag = - scic_controller_allocate_io_tag(request->owning_controller); - } - - /* Record the IO Tag in the request */ - if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { - task_context = request->task_context_buffer; - - task_context->task_index = scic_sds_io_tag_get_index(request->io_tag); - - switch (task_context->protocol_type) { - case SCU_TASK_CONTEXT_PROTOCOL_SMP: - case SCU_TASK_CONTEXT_PROTOCOL_SSP: - /* SSP/SMP Frame */ - task_context->type.ssp.tag = request->io_tag; - task_context->type.ssp.target_port_transfer_tag = 0xFFFF; - break; - - case SCU_TASK_CONTEXT_PROTOCOL_STP: - /* - * STP/SATA Frame - * task_context->type.stp.ncq_tag = request->ncq_tag; */ - break; - - case SCU_TASK_CONTEXT_PROTOCOL_NONE: - /* / @todo When do we set no protocol type? */ - break; - - default: - /* This should never happen since we build the IO requests */ - break; - } - - /* - * Check to see if we need to copy the task context buffer - * or have been building into the task context buffer */ - if (request->was_tag_assigned_by_user == false) { - scic_sds_controller_copy_task_context( - request->owning_controller, request); - } - - /* Add to the post_context the io tag value */ - request->post_context |= scic_sds_io_tag_get_index(request->io_tag); - - /* Everything is good go ahead and change state */ - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_STARTED); - - return SCI_SUCCESS; - } - - return SCI_FAILURE_INSUFFICIENT_RESOURCES; -} - /* * scic_sds_request_started_state_tc_completion_handler() - This method process * TC (task context) completions for normal IO request (i.e. Task/Abort @@ -2529,10 +2517,8 @@ static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( } static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { }, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { - .start_handler = scic_sds_request_constructed_state_start_handler, - }, + [SCI_BASE_REQUEST_STATE_INITIAL] = {}, + [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {}, [SCI_BASE_REQUEST_STATE_STARTED] = { .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, .frame_handler = scic_sds_request_started_state_frame_handler, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index b1733695231..bf8ac186085 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -438,12 +438,6 @@ typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t) * */ struct scic_sds_io_request_state_handler { - /** - * The start_handler specifies the method invoked when a user attempts to - * start a request. - */ - scic_sds_io_request_handler_t start_handler; - /** * The complete_handler specifies the method invoked when a user attempts to * complete a request. -- cgit v1.2.3-70-g09d2 From d1c637c35b33ddd2b405956e04b50939bb10ed2a Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Wed, 11 May 2011 08:27:47 -0700 Subject: isci: unify request frame handlers Unify the implementation in scic_sds_io_request_frame_handler and kill the state handler. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/remote_device.c | 3 +- drivers/scsi/isci/request.c | 1106 ++++++++++++++++--------------------- drivers/scsi/isci/request.h | 6 - 3 files changed, 465 insertions(+), 650 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 606ee2be154..b900e2c1b63 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -310,8 +310,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi sci_req = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); if (sci_req && sci_req->target_device == sci_dev) { /* The IO request is now in charge of releasing the frame */ - status = sci_req->state_handlers->frame_handler(sci_req, - frame_index); + status = scic_sds_io_request_frame_handler(sci_req, frame_index); } else { /* We could not map this tag to a valid IO * request Just toss the frame and continue diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 41a418d8b33..b9f97e8cc5e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -907,34 +907,6 @@ enum sci_status scic_sds_io_request_event_handler( return SCI_FAILURE_INVALID_STATE; } -/** - * - * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start - * operation is to be executed. - * @frame_index: The frame index returned by the hardware for the reqeust - * object. - * - * This method invokes the core state frame handler for the - * SCIC_SDS_IO_REQUEST_T object. enum sci_status - */ -enum sci_status scic_sds_io_request_frame_handler( - struct scic_sds_request *request, - u32 frame_index) -{ - if (request->state_handlers->frame_handler) - return request->state_handlers->frame_handler(request, frame_index); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given unexpected frame %x while in " - "state %d\n", - __func__, - frame_index, - sci_base_state_machine_get_state(&request->state_machine)); - - scic_sds_controller_release_frame(request->owning_controller, frame_index); - return SCI_FAILURE_INVALID_STATE; -} - /* * This function copies response data for requests returning response data * instead of sense data. @@ -1149,81 +1121,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completi return SCI_FAILURE_INVALID_STATE; } -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_frame_handler() request. This method - * first determines the frame type received. If this is a response frame then - * the response data is copied to the io request response buffer for processing - * at completion time. If the frame type is not a response buffer an error is - * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE - */ -static enum sci_status -scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - u32 *frame_header; - struct ssp_frame_hdr ssp_hdr; - ssize_t word_cnt; - - status = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - (void **)&frame_header); - - word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); - sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); - - if (ssp_hdr.frame_type == SSP_RESPONSE) { - struct ssp_response_iu *resp_iu; - ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - - status = scic_sds_unsolicited_frame_control_get_buffer( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - (void **)&resp_iu); - - sci_swab32_cpy(&sci_req->ssp.rsp, - resp_iu, word_cnt); - - resp_iu = &sci_req->ssp.rsp; - - if ((resp_iu->datapres == 0x01) || - (resp_iu->datapres == 0x02)) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - } else - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - } else { - /* This was not a response frame why did it get forwarded? */ - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC IO Request 0x%p received unexpected " - "frame %d type 0x%02x\n", - __func__, - sci_req, - frame_index, - ssp_hdr.frame_type); - } - - /* - * In any case we are done with this frame buffer return it to the - * controller - */ - scic_sds_controller_release_frame( - sci_req->owning_controller, frame_index); - - return SCI_SUCCESS; -} - -/* - * ***************************************************************************** - * * COMPLETED STATE HANDLERS - * ***************************************************************************** */ - - /* * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T * object receives a scic_sds_request_complete() request. This method frees up @@ -1281,24 +1178,6 @@ static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( return SCI_SUCCESS; } -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_frame_handler() request. This method - * discards the unsolicited frame since we are waiting for the abort task - * completion. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */ - - scic_sds_controller_release_frame( - sci_req->owning_controller, frame_index); - - return SCI_SUCCESS; -} - /** * This method processes the completions transport layer (TL) status to * determine if the RAW task management frame was sent successfully. If the @@ -1360,34 +1239,6 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi return SCI_SUCCESS; } -/** - * This method processes an unsolicited frame while the task mgmt request is - * waiting for a response frame. It will copy the response data, release - * the unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This method returns an indication of whether the TC response frame was - * handled successfully or not. SCI_SUCCESS Currently this value is always - * returned and indicates successful processing of the TC response. Should - * probably update to check frame type and make sure it is a response frame. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler( - struct scic_sds_request *request, - u32 frame_index) -{ - scic_sds_io_request_copy_response(request); - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - scic_sds_controller_release_frame(request->owning_controller, - frame_index); - return SCI_SUCCESS; -} - /** * This method processes an abnormal TC completion while the SMP request is * waiting for a response frame. It decides what happened to the IO based @@ -1450,81 +1301,6 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler return SCI_SUCCESS; } -/* - * This function processes an unsolicited frame while the SMP request is waiting - * for a response frame. It will copy the response data, release the - * unsolicited frame, and transition the request to the - * SCI_BASE_REQUEST_STATE_COMPLETED state. - * @sci_req: This parameter specifies the request for which the - * unsolicited frame was received. - * @frame_index: This parameter indicates the unsolicited frame index that - * should contain the response. - * - * This function returns an indication of whether the response frame was handled - * successfully or not. SCI_SUCCESS Currently this value is always returned and - * indicates successful processing of the TC response. - */ -static enum sci_status -scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - void *frame_header; - struct smp_resp *rsp_hdr = &sci_req->smp.rsp; - ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); - - status = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &frame_header); - - /* byte swap the header. */ - sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); - - if (rsp_hdr->frame_type == SMP_RESPONSE) { - void *smp_resp; - - status = scic_sds_unsolicited_frame_control_get_buffer( - &(scic_sds_request_get_controller(sci_req)->uf_control), - frame_index, - &smp_resp); - - word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / - sizeof(u32); - - sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, - smp_resp, word_cnt); - - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); - } else { - /* This was not a response frame why did it get forwarded? */ - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC SMP Request 0x%p received unexpected frame " - "%d type 0x%02x\n", - __func__, - sci_req, - frame_index, - rsp_hdr->frame_type); - - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_SMP_FRM_TYPE_ERR, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } - - scic_sds_controller_release_frame(sci_req->owning_controller, - frame_index); - - return SCI_SUCCESS; -} - /** * This method processes the completions transport layer (TL) status to * determine if the SMP request was sent successfully. If the SMP request @@ -1668,76 +1444,6 @@ static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_han return SCI_SUCCESS; } -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(sci_req->owning_controller), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ /* transmit DATA_FIS from (current sgl + offset) for input @@ -1952,248 +1658,58 @@ static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completi return status; } -static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) +static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( + + struct scic_sds_request *sci_req, + u32 completion_code) { - struct scic_sds_controller *scic = sci_req->owning_controller; + enum sci_status status = SCI_SUCCESS; + bool all_frames_transferred = false; struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(ireq); - struct dev_to_host_fis *frame_header; - enum sci_status status; - u32 *frame_buffer; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - switch (frame_header->fis_type) { - case FIS_PIO_SETUP: - /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - /* Get the data from the PIO Setup The SCU Hardware returns - * first word in the frame_header and the rest of the data is in - * the frame buffer so we need to back up one dword - */ - - /* transfer_count: first 16bits in the 4th dword */ - stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; - - /* ending_status: 4th byte in the 3rd dword */ - stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - sci_req->stp.rsp.status = stp_req->type.pio.ending_status; - - /* The next state is dependent on whether the - * request was PIO Data-in or Data out - */ - if (task->data_dir == DMA_FROM_DEVICE) { - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); - } else if (task->data_dir == DMA_TO_DEVICE) { - /* Transmit data */ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* Transmit data */ + if (stp_req->type.pio.pio_transfer_bytes != 0) { status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status != SCI_SUCCESS) - break; - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); - } - break; - case FIS_SETDEVBITS: - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - break; - case FIS_REGD2H: - if (frame_header->status & ATA_BUSY) { - /* Now why is the drive sending a D2H Register FIS when - * it is still busy? Do nothing since we are still in - * the right state. + if (status == SCI_SUCCESS) { + if (stp_req->type.pio.pio_transfer_bytes == 0) + all_frames_transferred = true; + } + } else if (stp_req->type.pio.pio_transfer_bytes == 0) { + /* + * this will happen if the all data is written at the + * first time after the pio setup fis is received */ - dev_dbg(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received " - "D2H Register FIS with BSY status " - "0x%x\n", __func__, stp_req, - frame_header->status); - break; + all_frames_transferred = true; } - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.req, - frame_header, - frame_buffer); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + /* all data transferred. */ + if (all_frames_transferred) { + /* + * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE + * and wait for PIO_SETUP fis / or D2H REg fis. */ + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE + ); + } break; + default: - /* FIXME: what do we do here? */ - break; - } - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - struct sata_fis_data *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - if (frame_header->fis_type == FIS_DATA) { - if (stp_req->type.pio.request_current.sgl_pair == NULL) { - sci_req->saved_rx_frame_index = frame_index; - stp_req->type.pio.pio_transfer_bytes = 0; - } else { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, - (u8 *)frame_buffer); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - /* Check for the end of the transfer, are there more - * bytes remaining for this data transfer - */ - if (status != SCI_SUCCESS || - stp_req->type.pio.pio_transfer_bytes != 0) - return status; - - if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - } else { - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); - } - } else { - dev_err(scic_to_dev(scic), - "%s: SCIC PIO Request 0x%p received frame %d " - "with fis type 0x%02x when expecting a data " - "fis.\n", __func__, stp_req, frame_index, - frame_header->fis_type); - - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_GOOD, - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - } - - return status; -} - - -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( - - struct scic_sds_request *sci_req, - u32 completion_code) -{ - enum sci_status status = SCI_SUCCESS; - bool all_frames_transferred = false; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - - switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* Transmit data */ - if (stp_req->type.pio.pio_transfer_bytes != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); - if (status == SCI_SUCCESS) { - if (stp_req->type.pio.pio_transfer_bytes == 0) - all_frames_transferred = true; - } - } else if (stp_req->type.pio.pio_transfer_bytes == 0) { - /* - * this will happen if the all data is written at the - * first time after the pio setup fis is received - */ - all_frames_transferred = true; - } - - /* all data transferred. */ - if (all_frames_transferred) { - /* - * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE - * and wait for PIO_SETUP fis / or D2H REg fis. */ - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - } - break; - - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); - - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); + /* + * All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. */ + scic_sds_request_set_status( + sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR + ); + + sci_base_state_machine_change_state( + &sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED + ); break; } @@ -2280,6 +1796,422 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc return status; } +enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) +{ + struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + enum sci_base_request_states state; + enum sci_status status; + ssize_t word_cnt; + + state = sci_req->state_machine.current_state_id; + switch (state) { + case SCI_BASE_REQUEST_STATE_STARTED: { + struct ssp_frame_hdr ssp_hdr; + void *frame_header; + + scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + &frame_header); + + word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); + sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); + + if (ssp_hdr.frame_type == SSP_RESPONSE) { + struct ssp_response_iu *resp_iu; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&resp_iu); + + sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt); + + resp_iu = &sci_req->ssp.rsp; + + if (resp_iu->datapres == 0x01 || + resp_iu->datapres == 0x02) { + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + } else + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + } else { + /* not a response frame, why did it get forwarded? */ + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p received unexpected " + "frame %d type 0x%02x\n", __func__, sci_req, + frame_index, ssp_hdr.frame_type); + } + + /* + * In any case we are done with this frame buffer return it to the + * controller + */ + scic_sds_controller_release_frame(scic, frame_index); + + return SCI_SUCCESS; + } + case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: + scic_sds_io_request_copy_response(sci_req); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + scic_sds_controller_release_frame(scic,frame_index); + return SCI_SUCCESS; + case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: { + struct smp_resp *rsp_hdr = &sci_req->smp.rsp; + void *frame_header; + + scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + &frame_header); + + /* byte swap the header. */ + word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); + sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); + + if (rsp_hdr->frame_type == SMP_RESPONSE) { + void *smp_resp; + + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + &smp_resp); + + word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / + sizeof(u32); + + sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, + smp_resp, word_cnt); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); + } else { + /* This was not a response frame why did it get forwarded? */ + dev_err(scic_to_dev(scic), + "%s: SCIC SMP Request 0x%p received unexpected frame " + "%d type 0x%02x\n", __func__, sci_req, + frame_index, rsp_hdr->frame_type); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_SMP_FRM_TYPE_ERR, + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } + + scic_sds_controller_release_frame(scic, frame_index); + + return SCI_SUCCESS; + } + case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: + return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); + case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: + /* Use the general frame handler to copy the resposne data */ + status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); + + if (status != SCI_SUCCESS) + return status; + + scic_sds_stp_request_udma_complete_request(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + return SCI_SUCCESS; + case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: { + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; + } + case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: { + struct isci_request *ireq = sci_req_to_ireq(sci_req); + struct sas_task *task = isci_request_access_task(ireq); + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_PIO_SETUP: + /* Get from the frame buffer the PIO Setup Data */ + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + /* Get the data from the PIO Setup The SCU Hardware returns + * first word in the frame_header and the rest of the data is in + * the frame buffer so we need to back up one dword + */ + + /* transfer_count: first 16bits in the 4th dword */ + stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; + + /* ending_status: 4th byte in the 3rd dword */ + stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + sci_req->stp.rsp.status = stp_req->type.pio.ending_status; + + /* The next state is dependent on whether the + * request was PIO Data-in or Data out + */ + if (task->data_dir == DMA_FROM_DEVICE) { + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); + } else if (task->data_dir == DMA_TO_DEVICE) { + /* Transmit data */ + status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + if (status != SCI_SUCCESS) + break; + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); + } + break; + case FIS_SETDEVBITS: + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + break; + case FIS_REGD2H: + if (frame_header->status & ATA_BUSY) { + /* Now why is the drive sending a D2H Register FIS when + * it is still busy? Do nothing since we are still in + * the right state. + */ + dev_dbg(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received " + "D2H Register FIS with BSY status " + "0x%x\n", __func__, stp_req, + frame_header->status); + break; + } + + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.req, + frame_header, + frame_buffer); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + break; + default: + /* FIXME: what do we do here? */ + break; + } + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; + } + case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: { + struct dev_to_host_fis *frame_header; + struct sata_fis_data *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + if (frame_header->fis_type != FIS_DATA) { + dev_err(scic_to_dev(scic), + "%s: SCIC PIO Request 0x%p received frame %d " + "with fis type 0x%02x when expecting a data " + "fis.\n", __func__, stp_req, frame_index, + frame_header->fis_type); + + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + return status; + } + + if (stp_req->type.pio.request_current.sgl_pair == NULL) { + sci_req->saved_rx_frame_index = frame_index; + stp_req->type.pio.pio_transfer_bytes = 0; + } else { + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, + (u8 *)frame_buffer); + + /* Frame is decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + } + + /* Check for the end of the transfer, are there more + * bytes remaining for this data transfer + */ + if (status != SCI_SUCCESS || + stp_req->type.pio.pio_transfer_bytes != 0) + return status; + + if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + } else { + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + } + return status; + } + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: { + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + frame_index, + (void **)&frame_header); + if (status != SCI_SUCCESS) { + dev_err(scic_to_dev(scic), + "%s: SCIC IO Request 0x%p could not get frame header " + "for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + frame_index, + (void **)&frame_buffer); + + scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); + break; + default: + dev_warn(scic_to_dev(scic), + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + SCI_FAILURE_PROTOCOL_VIOLATION); + break; + } + + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); + + /* Frame has been decoded return it to the controller */ + scic_sds_controller_release_frame(scic, frame_index); + + return status; + } + case SCI_BASE_REQUEST_STATE_ABORTING: + /* TODO: Is it even possible to get an unsolicited frame in the + * aborting state? + */ + scic_sds_controller_release_frame(scic, frame_index); + return SCI_SUCCESS; + default: + dev_warn(scic_to_dev(scic), + "%s: SCIC IO Request given unexpected frame %x while in " + "state %d\n", __func__, frame_index, state); + + scic_sds_controller_release_frame(scic, frame_index); + return SCI_FAILURE_INVALID_STATE; + } +} + + + + static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( struct scic_sds_request *sci_req, u32 completion_code) @@ -2336,32 +2268,6 @@ static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completi return status; } -static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - - /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); - - if (status != SCI_SUCCESS) - return status; - - scic_sds_stp_request_udma_complete_request(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - - return status; -} - -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 len, - enum dma_data_direction dir) -{ - return SCI_SUCCESS; -} - /** * * @sci_req: @@ -2447,117 +2353,36 @@ static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_c return SCI_SUCCESS; } -/** - * - * @request: This parameter specifies the request for which a frame has been - * received. - * @frame_index: This parameter specifies the index of the frame that has been - * received. - * - * This method processes frames received from the target while waiting for a - * device to host register FIS. If a non-register FIS is received during this - * time, it is treated as a protocol violation from an IO perspective. Indicate - * if the received frame was processed successfully. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler( - struct scic_sds_request *sci_req, - u32 frame_index) -{ - enum sci_status status; - struct dev_to_host_fis *frame_header; - u32 *frame_buffer; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_controller *scic = sci_req->owning_controller; - - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, - frame_index, - (void **)&frame_header); - if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); - return status; - } - - switch (frame_header->fis_type) { - case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, - frame_index, - (void **)&frame_buffer); - - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, - frame_header, - frame_buffer); - - /* The command has completed with error */ - scic_sds_request_set_status(sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - break; - - default: - dev_warn(scic_to_dev(scic), - "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, - frame_index); - - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); - break; - } - - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); - - /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); - - return status; -} - static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = {}, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {}, [SCI_BASE_REQUEST_STATE_STARTED] = { .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, - .frame_handler = scic_sds_request_started_state_frame_handler, }, [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .frame_handler = scic_sds_ssp_task_request_await_tc_response_frame_handler, - }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, - .frame_handler = scic_sds_smp_request_await_response_frame_handler, }, [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, }, [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, - .frame_handler = scic_sds_stp_request_udma_general_frame_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler, }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler, - }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler - }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, - .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, @@ -2568,15 +2393,12 @@ static const struct scic_sds_io_request_state_handler scic_sds_request_state_han [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler, - }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, [SCI_BASE_REQUEST_STATE_ABORTING] = { .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, - .frame_handler = scic_sds_request_aborting_state_frame_handler, }, [SCI_BASE_REQUEST_STATE_FINAL] = { }, }; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index bf8ac186085..c9070e70894 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -424,8 +424,6 @@ enum sci_base_request_states { typedef enum sci_status (*scic_sds_io_request_handler_t) (struct scic_sds_request *request); -typedef enum sci_status (*scic_sds_io_request_frame_handler_t) - (struct scic_sds_request *req, u32 frame); typedef enum sci_status (*scic_sds_io_request_event_handler_t) (struct scic_sds_request *req, u32 event); typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t) @@ -446,7 +444,6 @@ struct scic_sds_io_request_state_handler { scic_sds_io_request_task_completion_handler_t tc_completion_handler; scic_sds_io_request_event_handler_t event_handler; - scic_sds_io_request_frame_handler_t frame_handler; }; /** @@ -839,9 +836,6 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); -enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req, - u32 transfer_length, - enum dma_data_direction dir); void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); #endif /* !defined(_ISCI_REQUEST_H_) */ -- cgit v1.2.3-70-g09d2 From a7e255a34220ba57eeeb75637c911974e54c08e7 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Wed, 11 May 2011 08:27:47 -0700 Subject: isci: remove request task context completion state handler Unlike the other conversions this only updates scic_sds_io_request_tc_completion() to call the old state handlers directly (with less verbose names). This was done for future patch readability, the implementations have only minor differences for different completion codes. Without a reference to the function name it would be difficult to dicern which state is being updated. Considered changing the order to look up the completion code before the state but that was not a clean conversion either. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 531 ++++++++++++++++---------------------------- drivers/scsi/isci/request.h | 3 - 2 files changed, 197 insertions(+), 337 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index b9f97e8cc5e..cb13b78d802 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -932,27 +932,14 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) memcpy(resp_buf, ssp_response->resp_data, len); } -/* - * scic_sds_request_started_state_tc_completion_handler() - This method process - * TC (task context) completions for normal IO request (i.e. Task/Abort - * Completions of type 0). This method will update the - * SCIC_SDS_IO_REQUEST_T::status field. - * @sci_req: This parameter specifies the request for which a completion - * occurred. - * @completion_code: This parameter specifies the completion code received from - * the SCU. - * - */ -static enum sci_status -scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status request_started_state_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { - u8 datapres; struct ssp_response_iu *resp_iu; + u8 datapres; - /* - * TODO: Any SDMA return code of other than 0 is bad - * decode 0x003C0000 to determine SDMA status + /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 + * to determine SDMA status */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): @@ -960,11 +947,8 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): - { - /* - * There are times when the SCU hardware will return an early + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { + /* There are times when the SCU hardware will return an early * response because the io request specified more data than is * returned by the target device (mode pages, inquiry data, * etc.). We must check the response stats to see if this is @@ -979,21 +963,17 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc word_cnt); if (resp->status == 0) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS_IO_DONE_EARLY); + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS_IO_DONE_EARLY); } else { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); } + break; } - break; - - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): - { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); sci_swab32_cpy(&sci_req->ssp.rsp, @@ -1007,24 +987,22 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc } case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): - /* - * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame + /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame * guaranteed to be received before this completion status is * posted? */ resp_iu = &sci_req->ssp.rsp; datapres = resp_iu->datapres; - if ((datapres == 0x01) || (datapres == 0x02)) { - scic_sds_request_set_status( - sci_req, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + if (datapres == 1 || datapres == 2) { + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_CHECK_RESPONSE, + SCI_FAILURE_IO_RESPONSE_VALID); } else - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); break; - /* only stp device gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): @@ -1038,14 +1016,12 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (sci_req->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status( - sci_req, + scic_sds_request_set_status(sci_req, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); } else { - scic_sds_request_set_status( - sci_req, + scic_sds_request_set_status(sci_req, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1063,11 +1039,10 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status( - sci_req, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); + scic_sds_request_set_status(sci_req, + SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT, + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); break; /* neither ssp nor stp gets suspended. */ @@ -1105,22 +1080,6 @@ scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sc return SCI_SUCCESS; } -enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code) -{ - if (request->state_handlers->tc_completion_handler) - return request->state_handlers->tc_completion_handler(request, completion_code); - - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given task completion notification %x " - "while in wrong state %d\n", - __func__, - completion_code, - sci_base_state_machine_get_state(&request->state_machine)); - - return SCI_FAILURE_INVALID_STATE; -} - /* * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T * object receives a scic_sds_request_complete() request. This method frees up @@ -1146,54 +1105,32 @@ static enum sci_status scic_sds_request_completed_state_complete_handler( return SCI_SUCCESS; } -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_task_completion() request. This method - * decodes the completion type waiting for the abort task complete - * notification. When the abort task complete is received the io request - * transitions to the completed state. enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_aborting_state_tc_completion_handler( +static enum sci_status request_aborting_state_tc_event( struct scic_sds_request *sci_req, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, + SCI_FAILURE_IO_TERMINATED); sci_base_state_machine_change_state(&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); break; default: - /* - * Unless we get some strange error wait for the task abort to complete - * TODO: Should there be a state change for this completion? */ + /* Unless we get some strange error wait for the task abort to complete + * TODO: Should there be a state change for this completion? + */ break; } return SCI_SUCCESS; } -/** - * This method processes the completions transport layer (TL) status to - * determine if the RAW task management frame was sent successfully. If the - * raw frame was sent successfully, then the state for the task request - * transitions to waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): @@ -1203,33 +1140,27 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi sci_base_state_machine_change_state(&sci_req->state_machine, SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); break; - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): - /* - * Currently, the decision is to simply allow the task request to - * timeout if the task IU wasn't received successfully. - * There is a potential for receiving multiple task responses if we - * decide to send the task IU again. */ + /* Currently, the decision is to simply allow the task request + * to timeout if the task IU wasn't received successfully. + * There is a potential for receiving multiple task responses if + * we decide to send the task IU again. + */ dev_warn(scic_to_dev(sci_req->owning_controller), "%s: TaskRequest:0x%p CompletionCode:%x - " - "ACK/NAK timeout\n", - __func__, - sci_req, + "ACK/NAK timeout\n", __func__, sci_req, completion_code); sci_base_state_machine_change_state(&sci_req->state_machine, SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); break; - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, + /* All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request. + */ + scic_sds_request_set_status(sci_req, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_base_state_machine_change_state(&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); @@ -1239,27 +1170,15 @@ static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completi return SCI_SUCCESS; } -/** - * This method processes an abnormal TC completion while the SMP request is - * waiting for a response frame. It decides what happened to the IO based - * on TC completion status. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status smp_request_await_response_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - /* - * In the AWAIT RESPONSE state, any TC completion is unexpected. - * but if the TC has success status, we complete the IO anyway. */ + /* In the AWAIT RESPONSE state, any TC completion is + * unexpected. but if the TC has success status, we + * complete the IO anyway. + */ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); @@ -1271,11 +1190,13 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): - /* - * These status has been seen in a specific LSI expander, which sometimes - * is not able to send smp response within 2 ms. This causes our hardware - * break the connection and set TC completion with one of these SMP_XXX_XX_ERR - * status. For these type of error, we ask scic user to retry the request. */ + /* These status has been seen in a specific LSI + * expander, which sometimes is not able to send smp + * response within 2 ms. This causes our hardware break + * the connection and set TC completion with one of + * these SMP_XXX_XX_ERR status. For these type of error, + * we ask scic user to retry the request. + */ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); @@ -1284,14 +1205,12 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler break; default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + /* All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request + */ + scic_sds_request_set_status(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_base_state_machine_change_state(&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); @@ -1301,22 +1220,8 @@ static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler return SCI_SUCCESS; } -/** - * This method processes the completions transport layer (TL) status to - * determine if the SMP request was sent successfully. If the SMP request - * was sent successfully, then the state for the SMP request transits to - * waiting for a response frame. - * @sci_req: This parameter specifies the request for which the TC - * completion was received. - * @completion_code: This parameter indicates the completion status information - * for the TC. - * - * Indicate if the tc completion handler was successful. SCI_SUCCESS currently - * this method always returns success. - */ -static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): @@ -1326,20 +1231,17 @@ static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_ha sci_base_state_machine_change_state(&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); break; - default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + scic_sds_request_set_status(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } @@ -1400,44 +1302,29 @@ static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic return current_sgl; } -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE); break; default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + scic_sds_request_set_status(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } @@ -1613,55 +1500,38 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( return status; } -/** - * - * @sci_req: - * @completion_code: - * - * enum sci_status - */ -static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_status status = SCI_SUCCESS; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); break; default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + scic_sds_request_set_status(sci_req, + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } return status; } -static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler( - - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_status status = SCI_SUCCESS; bool all_frames_transferred = false; @@ -1695,7 +1565,6 @@ static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_com ); } break; - default: /* * All other completion status cause the IO to be complete. If a NAK @@ -2209,12 +2078,8 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r } } - - - -static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_status status = SCI_SUCCESS; @@ -2226,9 +2091,10 @@ static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completi break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): - /* - * We must check ther response buffer to see if the D2H Register FIS was - * received before we got the TC completion. */ + /* We must check ther response buffer to see if the D2H + * Register FIS was received before we got the TC + * completion. + */ if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { scic_sds_remote_device_suspend(sci_req->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); @@ -2237,18 +2103,22 @@ static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completi SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else { - /* - * If we have an error completion status for the TC then we can expect a - * D2H register FIS from the device so we must change state to wait for it */ + /* If we have an error completion status for the + * TC then we can expect a D2H register FIS from + * the device so we must change state to wait + * for it + */ sci_base_state_machine_change_state(&sci_req->state_machine, SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); } break; - /* - * / @todo Check to see if any of these completion status need to wait for - * / the device to host register fis. */ - /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */ + /* TODO Check to see if any of these completion status need to + * wait for the device to host register fis. + */ + /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR + * - this comes only for B0 + */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): @@ -2268,61 +2138,35 @@ static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completi return status; } -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status( - sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS - ); + scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + SCI_SUCCESS); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE); break; default: /* * All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, + scic_sds_request_set_status(sci_req, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state( - &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED); + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_COMPLETED); break; } return SCI_SUCCESS; } -/** - * - * @sci_req: - * @completion_code: - * - * This method processes a TC completion. The expected TC completion is for - * the transmission of the H2D register FIS containing the SATA/STP non-data - * request. This method always successfully processes the TC completion. - * SCI_SUCCESS This value is always returned. - */ -static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler( +static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event( struct scic_sds_request *sci_req, u32 completion_code) { @@ -2336,70 +2180,89 @@ static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_c break; default: - /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ - scic_sds_request_set_status( - sci_req, + /* All other completion status cause the IO to be complete. If + * a NAK was received, then it is up to the user to retry the + * request. + */ + scic_sds_request_set_status(sci_req, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + SCI_BASE_REQUEST_STATE_COMPLETED); break; } return SCI_SUCCESS; } +enum sci_status +scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completion_code) +{ + enum sci_base_request_states state; + struct scic_sds_controller *scic = sci_req->owning_controller; + + state = sci_req->state_machine.current_state_id; + + switch (state) { + case SCI_BASE_REQUEST_STATE_STARTED: + return request_started_state_tc_event(sci_req, completion_code); + case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: + return ssp_task_request_await_tc_event(sci_req, completion_code); + case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: + return smp_request_await_response_tc_event(sci_req, completion_code); + case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: + return smp_request_await_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: + return stp_request_udma_await_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: + return stp_request_non_data_await_h2d_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: + return stp_request_pio_await_h2d_completion_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: + return pio_data_out_tx_done_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: + return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, completion_code); + case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: + return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, completion_code); + case SCI_BASE_REQUEST_STATE_ABORTING: + return request_aborting_state_tc_event(sci_req, completion_code); + default: + dev_warn(scic_to_dev(scic), + "%s: SCIC IO Request given task completion notification %x " + "while in wrong state %d\n", __func__, completion_code, + state); + return SCI_FAILURE_INVALID_STATE; + } +} + + + static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { [SCI_BASE_REQUEST_STATE_INITIAL] = {}, [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {}, - [SCI_BASE_REQUEST_STATE_STARTED] = { - .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .tc_completion_handler = scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler, - }, + [SCI_BASE_REQUEST_STATE_STARTED] = { }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { }, [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .tc_completion_handler = scic_sds_smp_request_await_response_tc_completion_handler, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .tc_completion_handler = scic_sds_smp_request_await_tc_completion_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler, - }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler, - }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler, - }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { - .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler, - }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .complete_handler = scic_sds_request_completed_state_complete_handler, }, - [SCI_BASE_REQUEST_STATE_ABORTING] = { - .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler, - }, + [SCI_BASE_REQUEST_STATE_ABORTING] = { }, [SCI_BASE_REQUEST_STATE_FINAL] = { }, }; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index c9070e70894..e13ca3f7c8d 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -426,8 +426,6 @@ typedef enum sci_status (*scic_sds_io_request_handler_t) (struct scic_sds_request *request); typedef enum sci_status (*scic_sds_io_request_event_handler_t) (struct scic_sds_request *req, u32 event); -typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t) - (struct scic_sds_request *req, u32 completion_code); /** * struct scic_sds_io_request_state_handler - This is the SDS core definition @@ -442,7 +440,6 @@ struct scic_sds_io_request_state_handler { */ scic_sds_io_request_handler_t complete_handler; - scic_sds_io_request_task_completion_handler_t tc_completion_handler; scic_sds_io_request_event_handler_t event_handler; }; -- cgit v1.2.3-70-g09d2 From 79e2b6b27699c916e3c7cda18a26d47fea6017fb Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Wed, 11 May 2011 08:29:56 -0700 Subject: isci: remove the completion and event state handlers With these handlers gone the rest of the state handler infrastructure is removed. Added some WARN_ONCEs where previously we would cause NULL pointer dereferences or silently run handlers from a previous state. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 514 +++++++------------------------------------- drivers/scsi/isci/request.h | 37 +--- 2 files changed, 82 insertions(+), 469 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index cb13b78d802..c63064ede38 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -890,21 +890,62 @@ scic_sds_io_request_terminate(struct scic_sds_request *sci_req) return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_io_request_event_handler( - struct scic_sds_request *request, - u32 event_code) +enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) { - if (request->state_handlers->event_handler) - return request->state_handlers->event_handler(request, event_code); + enum sci_base_request_states state; + struct scic_sds_controller *scic = sci_req->owning_controller; + + state = sci_req->state_machine.current_state_id; + if (WARN_ONCE(state != SCI_BASE_REQUEST_STATE_COMPLETED, + "isci: request completion from wrong state (%d)\n", state)) + return SCI_FAILURE_INVALID_STATE; - dev_warn(scic_to_dev(request->owning_controller), - "%s: SCIC IO Request given event code notification %x while " - "in wrong state %d\n", - __func__, - event_code, - sci_base_state_machine_get_state(&request->state_machine)); + if (!sci_req->was_tag_assigned_by_user) + scic_controller_free_io_tag(scic, sci_req->io_tag); - return SCI_FAILURE_INVALID_STATE; + if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) + scic_sds_controller_release_frame(scic, + sci_req->saved_rx_frame_index); + + /* XXX can we just stop the machine and remove the 'final' state? */ + sci_base_state_machine_change_state(&sci_req->state_machine, + SCI_BASE_REQUEST_STATE_FINAL); + return SCI_SUCCESS; +} + +enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, + u32 event_code) +{ + enum sci_base_request_states state; + struct scic_sds_controller *scic = sci_req->owning_controller; + + state = sci_req->state_machine.current_state_id; + + if (state != SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE) { + dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", + __func__, event_code, state); + + return SCI_FAILURE_INVALID_STATE; + } + + switch (scu_get_event_specifier(event_code)) { + case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: + /* We are waiting for data and the SCU has R_ERR the data frame. + * Go back to waiting for the D2H Register FIS + */ + sci_base_state_machine_change_state(&sci_req->state_machine, + SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + return SCI_SUCCESS; + default: + dev_err(scic_to_dev(scic), + "%s: pio request unexpected event %#x\n", + __func__, event_code); + + /* TODO Should we fail the PIO request when we get an + * unexpected event? + */ + return SCI_FAILURE; + } } /* @@ -1080,34 +1121,8 @@ static enum sci_status request_started_state_tc_event(struct scic_sds_request *s return SCI_SUCCESS; } -/* - * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T - * object receives a scic_sds_request_complete() request. This method frees up - * any io request resources that have been allocated and transitions the - * request to its final state. Consider stopping the state machine instead of - * transitioning to the final state? enum sci_status SCI_SUCCESS - */ -static enum sci_status scic_sds_request_completed_state_complete_handler( - struct scic_sds_request *request) -{ - if (request->was_tag_assigned_by_user != true) { - scic_controller_free_io_tag( - request->owning_controller, request->io_tag); - } - - if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) { - scic_sds_controller_release_frame( - request->owning_controller, request->saved_rx_frame_index); - } - - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_FINAL); - return SCI_SUCCESS; -} - -static enum sci_status request_aborting_state_tc_event( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status request_aborting_state_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): @@ -1585,48 +1600,6 @@ static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sc return status; } -/** - * - * @request: This is the request which is receiving the event. - * @event_code: This is the event code that the request on which the request is - * expected to take action. - * - * This method will handle any link layer events while waiting for the data - * frame. enum sci_status SCI_SUCCESS SCI_FAILURE - */ -static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler( - struct scic_sds_request *request, - u32 event_code) -{ - enum sci_status status; - - switch (scu_get_event_specifier(event_code)) { - case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: - /* - * We are waiting for data and the SCU has R_ERR the data frame. - * Go back to waiting for the D2H Register FIS */ - sci_base_state_machine_change_state( - &request->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); - - status = SCI_SUCCESS; - break; - - default: - dev_err(scic_to_dev(request->owning_controller), - "%s: SCIC PIO Request 0x%p received unexpected " - "event 0x%08x\n", - __func__, request, event_code); - - /* / @todo Should we fail the PIO request when we get an unexpected event? */ - status = SCI_FAILURE; - break; - } - - return status; -} - static void scic_sds_stp_request_udma_complete_request( struct scic_sds_request *request, u32 scu_status, @@ -2236,37 +2209,6 @@ scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completi } } - - -static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = {}, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {}, - [SCI_BASE_REQUEST_STATE_STARTED] = { }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, - [SCI_BASE_REQUEST_STATE_COMPLETED] = { - .complete_handler = scic_sds_request_completed_state_complete_handler, - }, - [SCI_BASE_REQUEST_STATE_ABORTING] = { }, - [SCI_BASE_REQUEST_STATE_FINAL] = { }, -}; - - /** * isci_request_process_response_iu() - This function sets the status and * response iu, in the task struct, from the request object for the upper @@ -2958,46 +2900,6 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, isci_host_can_dequeue(isci_host, 1); } -/** - * scic_sds_request_initial_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial - * base request is constructed. Entry into the initial state sets all handlers - * for the io request object to their default handlers. none - */ -static void scic_sds_request_initial_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_INITIAL - ); -} - -/** - * scic_sds_request_constructed_state_enter() - - * @object: The io request object that is to enter the constructed state. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers - * for the the constructed state. none - */ -static void scic_sds_request_constructed_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_CONSTRUCTED - ); -} - static void scic_sds_request_started_state_enter(void *object) { struct scic_sds_request *sci_req = object; @@ -3011,12 +2913,6 @@ static void scic_sds_request_started_state_enter(void *object) */ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_STARTED - ); - /* all unaccelerated request types (non ssp or ncq) handled with * substates */ @@ -3046,30 +2942,13 @@ static void scic_sds_request_started_state_enter(void *object) } } -/** - * scic_sds_request_completed_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the - * SCIC_SDS_IO_REQUEST has completed. The method will decode the request - * completion status and convert it to an enum sci_status to return in the - * completion callback function. none - */ static void scic_sds_request_completed_state_enter(void *object) { struct scic_sds_request *sci_req = object; - struct scic_sds_controller *scic = - scic_sds_request_get_controller(sci_req); + struct scic_sds_controller *scic = sci_req->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_request *ireq = sci_req_to_ireq(sci_req); - SET_STATE_HANDLER(sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_COMPLETED); - /* Tell the SCI_USER that the IO request is complete */ if (sci_req->is_task_management_request == false) isci_request_io_request_complete(ihost, ireq, @@ -3078,93 +2957,12 @@ static void scic_sds_request_completed_state_enter(void *object) isci_task_request_complete(ihost, ireq, sci_req->sci_status); } -/** - * scic_sds_request_aborting_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST - * object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_ABORTING state. none - */ static void scic_sds_request_aborting_state_enter(void *object) { struct scic_sds_request *sci_req = object; /* Setting the abort bit in the Task Context is required by the silicon. */ sci_req->task_context_buffer->abort = 1; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_ABORTING - ); -} - -/** - * scic_sds_request_final_state_enter() - - * @object: This parameter specifies the base object for which the state - * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object. - * - * This method implements the actions taken when entering the - * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the - * state handlers in place. none - */ -static void scic_sds_request_final_state_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCI_BASE_REQUEST_STATE_FINAL - ); -} - -static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION - ); -} - -static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE - ); -} - -static void scic_sds_smp_request_started_await_response_substate_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE - ); -} - -static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION - ); } static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( @@ -3172,133 +2970,27 @@ static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter( { struct scic_sds_request *sci_req = object; - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE - ); -} - - - -static void scic_sds_stp_request_started_pio_await_h2d_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req); -} - -static void scic_sds_stp_request_started_pio_await_frame_enter(void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); + scic_sds_remote_device_set_working_request(sci_req->target_device, + sci_req); } -static void scic_sds_stp_request_started_pio_data_in_await_data_enter( - void *object) +static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(void *object) { struct scic_sds_request *sci_req = object; - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE - ); + scic_sds_remote_device_set_working_request(sci_req->target_device, + sci_req); } -static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter( - void *object) +static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(void *object) { struct scic_sds_request *sci_req = object; - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE - ); + scic_sds_remote_device_set_working_request(sci_req->target_device, + sci_req); } - - -static void scic_sds_stp_request_started_udma_await_tc_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE - ); -} - -/** - * - * - * This state is entered when there is an TC completion failure. The hardware - * received an unexpected condition while processing the IO request and now - * will UF the D2H register FIS to complete the IO. - */ -static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE - ); -} - - - -static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE - ); - - scic_sds_remote_device_set_working_request( - sci_req->target_device, sci_req - ); -} - -static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter( - void *object) +static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(void *object) { struct scic_sds_request *sci_req = object; struct scu_task_context *task_context; @@ -3315,91 +3007,45 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet task_context->control_frame = 0; status = scic_controller_continue_io(sci_req); - if (status == SCI_SUCCESS) { - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE - ); - } -} - -static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter( - void *object) -{ - struct scic_sds_request *sci_req = object; - - SET_STATE_HANDLER( - sci_req, - scic_sds_request_state_handler_table, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE - ); + WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); } static const struct sci_base_state scic_sds_request_state_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { - .enter_state = scic_sds_request_initial_state_enter, - }, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { - .enter_state = scic_sds_request_constructed_state_enter, - }, + [SCI_BASE_REQUEST_STATE_INITIAL] = { }, + [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { }, [SCI_BASE_REQUEST_STATE_STARTED] = { .enter_state = scic_sds_request_started_state_enter, }, [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter, - }, + [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_await_frame_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter, - }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter, - }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { }, + [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, }, [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter, - }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { - .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { - .enter_state = scic_sds_smp_request_started_await_response_substate_enter, - }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { - .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter, - }, + [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { }, + [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { }, + [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { }, [SCI_BASE_REQUEST_STATE_COMPLETED] = { .enter_state = scic_sds_request_completed_state_enter, }, [SCI_BASE_REQUEST_STATE_ABORTING] = { .enter_state = scic_sds_request_aborting_state_enter, }, - [SCI_BASE_REQUEST_STATE_FINAL] = { - .enter_state = scic_sds_request_final_state_enter, - }, + [SCI_BASE_REQUEST_STATE_FINAL] = { }, }; static void scic_sds_general_request_construct(struct scic_sds_controller *scic, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index e13ca3f7c8d..31d6d571747 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -223,13 +223,6 @@ struct scic_sds_request { */ u32 saved_rx_frame_index; - /** - * This field specifies the current state handlers in place for this - * IO Request object. This field is updated each time the request - * changes state. - */ - const struct scic_sds_io_request_state_handler *state_handlers; - /** * This field in the recorded device sequence for the io request. This is * recorded during the build operation and is compared in the start @@ -422,27 +415,6 @@ enum sci_base_request_states { SCI_BASE_REQUEST_STATE_FINAL, }; -typedef enum sci_status (*scic_sds_io_request_handler_t) - (struct scic_sds_request *request); -typedef enum sci_status (*scic_sds_io_request_event_handler_t) - (struct scic_sds_request *req, u32 event); - -/** - * struct scic_sds_io_request_state_handler - This is the SDS core definition - * of the state handlers. - * - * - */ -struct scic_sds_io_request_state_handler { - /** - * The complete_handler specifies the method invoked when a user attempts to - * complete a request. - */ - scic_sds_io_request_handler_t complete_handler; - - scic_sds_io_request_event_handler_t event_handler; -}; - /** * scic_sds_request_get_controller() - * @@ -495,13 +467,6 @@ struct scic_sds_io_request_state_handler { (request)->sci_status = (sci_status_code); \ } -#define scic_sds_request_complete(a_request) \ - ((a_request)->state_handlers->complete_handler(a_request)) - - -extern enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code); - /** * SCU_SGL_ZERO() - * @@ -538,6 +503,8 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, u32 frame_index); enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); +extern enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req); +extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); /* XXX open code in caller */ static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, -- cgit v1.2.3-70-g09d2 From e301370ac553a9a0ac0d1d25e769b86cf60395b3 Mon Sep 17 00:00:00 2001 From: Edmund Nadolski <edmund.nadolski@intel.com> Date: Thu, 2 Jun 2011 00:10:43 +0000 Subject: isci: state machine cleanup This cleans up several areas of the state machine mechanism: o Rename sci_base_state_machine_change_state to sci_change_state o Remove sci_base_state_machine_get_state function o Rename 'state_machine' struct member to 'sm' in client structs o Shorten the name of request states o Shorten state machine state names as follows: SCI_BASE_CONTROLLER_STATE_xxx to SCIC_xxx SCI_BASE_PHY_STATE_xxx to SCI_PHY_xxx SCIC_SDS_PHY_STARTING_SUBSTATE_xxx to SCI_PHY_SUB_xxx SCI_BASE_PORT_STATE_xxx to SCI_PORT_xxx and SCIC_SDS_PORT_READY_SUBSTATE_xxx to SCI_PORT_SUB_xxx SCI_BASE_REMOTE_DEVICE_STATE_xxx to SCI_DEV_xxx SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_xxx to SCI_STP_DEV_xxx SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_xxx to SCI_SMP_DEV_xxx SCIC_SDS_REMOTE_NODE_CONTEXT_xxx_STATE to SCI_RNC_xxx Signed-off-by: Edmund Nadolski <edmund.nadolski@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 201 +++++------ drivers/scsi/isci/host.h | 25 +- drivers/scsi/isci/phy.c | 244 ++++++------- drivers/scsi/isci/phy.h | 34 +- drivers/scsi/isci/port.c | 186 +++++----- drivers/scsi/isci/port.h | 18 +- drivers/scsi/isci/port_config.c | 4 +- drivers/scsi/isci/remote_device.c | 349 ++++++++++--------- drivers/scsi/isci/remote_device.h | 34 +- drivers/scsi/isci/remote_node_context.c | 154 ++++----- drivers/scsi/isci/remote_node_context.h | 18 +- drivers/scsi/isci/request.c | 582 +++++++++++++++++--------------- drivers/scsi/isci/request.h | 357 ++++++++++---------- drivers/scsi/isci/state_machine.c | 26 +- drivers/scsi/isci/state_machine.h | 4 +- 15 files changed, 1076 insertions(+), 1160 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 8801955be21..81ee64c0a4b 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -635,8 +635,7 @@ static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__, interrupt_status); - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_FAILED); + sci_change_state(&scic->sm, SCIC_FAILED); return; } @@ -895,14 +894,12 @@ static void scic_sds_controller_transition_to_ready( { struct isci_host *ihost = scic_to_ihost(scic); - if (scic->state_machine.current_state_id == - SCI_BASE_CONTROLLER_STATE_STARTING) { + if (scic->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports * may be up and operational. */ - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_READY); + sci_change_state(&scic->sm, SCIC_READY); isci_host_start_complete(ihost, status); } @@ -912,18 +909,18 @@ static bool is_phy_starting(struct scic_sds_phy *sci_phy) { enum scic_sds_phy_states state; - state = sci_phy->state_machine.current_state_id; + state = sci_phy->sm.current_state_id; switch (state) { - case SCI_BASE_PHY_STATE_STARTING: - case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF: - case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL: + case SCI_PHY_STARTING: + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_IAF_UF: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: return true; default: return false; @@ -957,7 +954,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro for (index = 0; index < SCI_MAX_PHYS; index++) { sci_phy = &ihost->phys[index].sci; - state = sci_phy->state_machine.current_state_id; + state = sci_phy->sm.current_state_id; if (!phy_get_non_dummy_port(sci_phy)) continue; @@ -968,12 +965,9 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro * - have an indication of a connected device and it has * finished the link training process. */ - if ((sci_phy->is_in_link_training == false && - state == SCI_BASE_PHY_STATE_INITIAL) || - (sci_phy->is_in_link_training == false && - state == SCI_BASE_PHY_STATE_STOPPED) || - (sci_phy->is_in_link_training == true && - is_phy_starting(sci_phy))) { + if ((sci_phy->is_in_link_training == false && state == SCI_PHY_INITIAL) || + (sci_phy->is_in_link_training == false && state == SCI_PHY_STOPPED) || + (sci_phy->is_in_link_training == true && is_phy_starting(sci_phy))) { is_controller_start_complete = false; break; } @@ -1059,8 +1053,7 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, enum sci_status result; u16 index; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_INITIALIZED) { + if (scic->sm.current_state_id != SCIC_INITIALIZED) { dev_warn(scic_to_dev(scic), "SCIC Controller start operation requested in " "invalid state\n"); @@ -1108,8 +1101,7 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, sci_mod_timer(&scic->timer, timeout); - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_STARTING); + sci_change_state(&scic->sm, SCIC_STARTING); return SCI_SUCCESS; } @@ -1279,8 +1271,7 @@ static void isci_host_completion_routine(unsigned long data) static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, u32 timeout) { - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_READY) { + if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "SCIC Controller stop operation requested in " "invalid state\n"); @@ -1288,8 +1279,7 @@ static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, } sci_mod_timer(&scic->timer, timeout); - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_STOPPING); + sci_change_state(&scic->sm, SCIC_STOPPING); return SCI_SUCCESS; } @@ -1307,17 +1297,16 @@ static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, */ static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) { - switch (scic->state_machine.current_state_id) { - case SCI_BASE_CONTROLLER_STATE_RESET: - case SCI_BASE_CONTROLLER_STATE_READY: - case SCI_BASE_CONTROLLER_STATE_STOPPED: - case SCI_BASE_CONTROLLER_STATE_FAILED: + switch (scic->sm.current_state_id) { + case SCIC_RESET: + case SCIC_READY: + case SCIC_STOPPED: + case SCIC_FAILED: /* * The reset operation is not a graceful cleanup, just * perform the state transition. */ - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_RESETTING); + sci_change_state(&scic->sm, SCIC_RESETTING); return SCI_SUCCESS; default: dev_warn(scic_to_dev(scic), @@ -1416,15 +1405,14 @@ static void isci_user_parameters_get( static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_RESET); + sci_change_state(&scic->sm, SCIC_RESET); } static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); sci_del_timer(&scic->timer); } @@ -1551,7 +1539,7 @@ static enum sci_status scic_controller_set_interrupt_coalescence( static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); /* set the default interrupt coalescence number and timeout value. */ scic_controller_set_interrupt_coalescence(scic, 0x10, 250); @@ -1559,7 +1547,7 @@ static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); /* disable interrupt coalescence. */ scic_controller_set_interrupt_coalescence(scic, 0, 0); @@ -1650,7 +1638,7 @@ static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controll static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); /* Stop all of the components for this controller */ scic_sds_controller_stop_phys(scic); @@ -1660,7 +1648,7 @@ static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machi static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); sci_del_timer(&scic->timer); } @@ -1691,36 +1679,35 @@ static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic) static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine); + struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); scic_sds_controller_reset_hardware(scic); - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_RESET); + sci_change_state(&scic->sm, SCIC_RESET); } static const struct sci_base_state scic_sds_controller_state_table[] = { - [SCI_BASE_CONTROLLER_STATE_INITIAL] = { + [SCIC_INITIAL] = { .enter_state = scic_sds_controller_initial_state_enter, }, - [SCI_BASE_CONTROLLER_STATE_RESET] = {}, - [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {}, - [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {}, - [SCI_BASE_CONTROLLER_STATE_STARTING] = { + [SCIC_RESET] = {}, + [SCIC_INITIALIZING] = {}, + [SCIC_INITIALIZED] = {}, + [SCIC_STARTING] = { .exit_state = scic_sds_controller_starting_state_exit, }, - [SCI_BASE_CONTROLLER_STATE_READY] = { + [SCIC_READY] = { .enter_state = scic_sds_controller_ready_state_enter, .exit_state = scic_sds_controller_ready_state_exit, }, - [SCI_BASE_CONTROLLER_STATE_RESETTING] = { + [SCIC_RESETTING] = { .enter_state = scic_sds_controller_resetting_state_enter, }, - [SCI_BASE_CONTROLLER_STATE_STOPPING] = { + [SCIC_STOPPING] = { .enter_state = scic_sds_controller_stopping_state_enter, .exit_state = scic_sds_controller_stopping_state_exit, }, - [SCI_BASE_CONTROLLER_STATE_STOPPED] = {}, - [SCI_BASE_CONTROLLER_STATE_FAILED] = {} + [SCIC_STOPPED] = {}, + [SCIC_FAILED] = {} }; static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic) @@ -1774,7 +1761,7 @@ static void controller_timeout(unsigned long data) struct sci_timer *tmr = (struct sci_timer *)data; struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer); struct isci_host *ihost = scic_to_ihost(scic); - struct sci_base_state_machine *sm = &scic->state_machine; + struct sci_base_state_machine *sm = &scic->sm; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); @@ -1782,10 +1769,10 @@ static void controller_timeout(unsigned long data) if (tmr->cancel) goto done; - if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING) + if (sm->current_state_id == SCIC_STARTING) scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT); - else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) { - sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED); + else if (sm->current_state_id == SCIC_STOPPING) { + sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); } else /* / @todo Now what do we want to do in this case? */ dev_err(scic_to_dev(scic), @@ -1820,11 +1807,11 @@ static enum sci_status scic_controller_construct(struct scic_sds_controller *sci struct isci_host *ihost = scic_to_ihost(scic); u8 i; - sci_base_state_machine_construct(&scic->state_machine, + sci_base_state_machine_construct(&scic->sm, scic_sds_controller_state_table, - SCI_BASE_CONTROLLER_STATE_INITIAL); + SCIC_INITIAL); - sci_base_state_machine_start(&scic->state_machine); + sci_base_state_machine_start(&scic->sm); scic->scu_registers = scu_base; scic->smu_registers = smu_base; @@ -1899,11 +1886,11 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, union scic_oem_parameters *scic_parms) { - u32 state = scic->state_machine.current_state_id; + u32 state = scic->sm.current_state_id; - if (state == SCI_BASE_CONTROLLER_STATE_RESET || - state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || - state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { + if (state == SCIC_RESET || + state == SCIC_INITIALIZING || + state == SCIC_INITIALIZED) { if (scic_oem_parameters_validate(&scic_parms->sds1)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; @@ -2168,10 +2155,8 @@ static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic { enum sci_status status = SCI_SUCCESS; - if ((scic->state_machine.current_state_id == - SCI_BASE_CONTROLLER_STATE_INITIALIZING) || - (scic->state_machine.current_state_id == - SCI_BASE_CONTROLLER_STATE_INITIALIZED)) { + if ((scic->sm.current_state_id == SCIC_INITIALIZING) || + (scic->sm.current_state_id == SCIC_INITIALIZED)) { switch (operating_mode) { case SCI_MODE_SPEED: scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; @@ -2216,20 +2201,19 @@ static void scic_sds_controller_initialize_power_control(struct scic_sds_control static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) { - struct sci_base_state_machine *sm = &scic->state_machine; + struct sci_base_state_machine *sm = &scic->sm; enum sci_status result = SCI_SUCCESS; struct isci_host *ihost = scic_to_ihost(scic); u32 index, state; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_RESET) { + if (scic->sm.current_state_id != SCIC_RESET) { dev_warn(scic_to_dev(scic), "SCIC Controller initialize operation requested " "in invalid state\n"); return SCI_FAILURE_INVALID_STATE; } - sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING); + sci_change_state(sm, SCIC_INITIALIZING); sci_init_timer(&scic->phy_timer, phy_startup_timeout); @@ -2374,10 +2358,10 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc /* Advance the controller state machine */ if (result == SCI_SUCCESS) - state = SCI_BASE_CONTROLLER_STATE_INITIALIZED; + state = SCIC_INITIALIZED; else - state = SCI_BASE_CONTROLLER_STATE_FAILED; - sci_base_state_machine_change_state(sm, state); + state = SCIC_FAILED; + sci_change_state(sm, state); return result; } @@ -2386,11 +2370,11 @@ static enum sci_status scic_user_parameters_set( struct scic_sds_controller *scic, union scic_user_parameters *scic_parms) { - u32 state = scic->state_machine.current_state_id; + u32 state = scic->sm.current_state_id; - if (state == SCI_BASE_CONTROLLER_STATE_RESET || - state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || - state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { + if (state == SCIC_RESET || + state == SCIC_INITIALIZING || + state == SCIC_INITIALIZED) { u16 index; /* @@ -2612,15 +2596,15 @@ int isci_host_init(struct isci_host *isci_host) void scic_sds_controller_link_up(struct scic_sds_controller *scic, struct scic_sds_port *port, struct scic_sds_phy *phy) { - switch (scic->state_machine.current_state_id) { - case SCI_BASE_CONTROLLER_STATE_STARTING: + switch (scic->sm.current_state_id) { + case SCIC_STARTING: sci_del_timer(&scic->phy_timer); scic->phy_startup_timer_pending = false; scic->port_agent.link_up_handler(scic, &scic->port_agent, port, phy); scic_sds_controller_start_next_phy(scic); break; - case SCI_BASE_CONTROLLER_STATE_READY: + case SCIC_READY: scic->port_agent.link_up_handler(scic, &scic->port_agent, port, phy); break; @@ -2628,16 +2612,16 @@ void scic_sds_controller_link_up(struct scic_sds_controller *scic, dev_dbg(scic_to_dev(scic), "%s: SCIC Controller linkup event from phy %d in " "unexpected state %d\n", __func__, phy->phy_index, - scic->state_machine.current_state_id); + scic->sm.current_state_id); } } void scic_sds_controller_link_down(struct scic_sds_controller *scic, struct scic_sds_port *port, struct scic_sds_phy *phy) { - switch (scic->state_machine.current_state_id) { - case SCI_BASE_CONTROLLER_STATE_STARTING: - case SCI_BASE_CONTROLLER_STATE_READY: + switch (scic->sm.current_state_id) { + case SCIC_STARTING: + case SCIC_READY: scic->port_agent.link_down_handler(scic, &scic->port_agent, port, phy); break; @@ -2647,7 +2631,7 @@ void scic_sds_controller_link_down(struct scic_sds_controller *scic, "unexpected state %d\n", __func__, phy->phy_index, - scic->state_machine.current_state_id); + scic->sm.current_state_id); } } @@ -2663,8 +2647,7 @@ static bool scic_sds_controller_has_remote_devices_stopping( for (index = 0; index < controller->remote_node_entries; index++) { if ((controller->device_table[index] != NULL) && - (controller->device_table[index]->state_machine.current_state_id - == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING)) + (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) return true; } @@ -2678,19 +2661,17 @@ static bool scic_sds_controller_has_remote_devices_stopping( void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev) { - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_STOPPING) { + if (scic->sm.current_state_id != SCIC_STOPPING) { dev_dbg(scic_to_dev(scic), "SCIC Controller 0x%p remote device stopped event " "from device 0x%p in unexpected state %d\n", scic, sci_dev, - scic->state_machine.current_state_id); + scic->sm.current_state_id); return; } if (!scic_sds_controller_has_remote_devices_stopping(scic)) { - sci_base_state_machine_change_state(&scic->state_machine, - SCI_BASE_CONTROLLER_STATE_STOPPED); + sci_change_state(&scic->sm, SCIC_STOPPED); } } @@ -2948,8 +2929,7 @@ enum sci_status scic_controller_start_io( { enum sci_status status; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_READY) { + if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "invalid state to start I/O"); return SCI_FAILURE_INVALID_STATE; } @@ -2986,8 +2966,7 @@ enum sci_status scic_controller_terminate_request( { enum sci_status status; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_READY) { + if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "invalid state to terminate request\n"); return SCI_FAILURE_INVALID_STATE; @@ -3037,11 +3016,11 @@ enum sci_status scic_controller_complete_io( enum sci_status status; u16 index; - switch (scic->state_machine.current_state_id) { - case SCI_BASE_CONTROLLER_STATE_STOPPING: + switch (scic->sm.current_state_id) { + case SCIC_STOPPING: /* XXX: Implement this function */ return SCI_FAILURE; - case SCI_BASE_CONTROLLER_STATE_READY: + case SCIC_READY: status = scic_sds_remote_device_complete_io(scic, rdev, request); if (status != SCI_SUCCESS) return status; @@ -3060,8 +3039,7 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) { struct scic_sds_controller *scic = sci_req->owning_controller; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_READY) { + if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); return SCI_FAILURE_INVALID_STATE; } @@ -3107,8 +3085,7 @@ enum sci_task_status scic_controller_start_task( { enum sci_status status; - if (scic->state_machine.current_state_id != - SCI_BASE_CONTROLLER_STATE_READY) { + if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "%s: SCIC Controller starting task from invalid " "state\n", diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4ce39e1803f..be09765ee1d 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -134,7 +134,7 @@ struct scic_sds_controller { * This field contains the information for the base controller state * machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; /** * Timer for controller start/stop operations. @@ -359,7 +359,7 @@ enum scic_sds_controller_states { /** * Simply the initial state for the base controller state machine. */ - SCI_BASE_CONTROLLER_STATE_INITIAL = 0, + SCIC_INITIAL = 0, /** * This state indicates that the controller is reset. The memory for @@ -368,7 +368,7 @@ enum scic_sds_controller_states { * This state is entered from the INITIAL state. * This state is entered from the RESETTING state. */ - SCI_BASE_CONTROLLER_STATE_RESET, + SCIC_RESET, /** * This state is typically an action state that indicates the controller @@ -376,28 +376,28 @@ enum scic_sds_controller_states { * are permitted. * This state is entered from the RESET state. */ - SCI_BASE_CONTROLLER_STATE_INITIALIZING, + SCIC_INITIALIZING, /** * This state indicates that the controller has been successfully * initialized. In this state no new IO operations are permitted. * This state is entered from the INITIALIZING state. */ - SCI_BASE_CONTROLLER_STATE_INITIALIZED, + SCIC_INITIALIZED, /** * This state indicates the the controller is in the process of becoming * ready (i.e. starting). In this state no new IO operations are permitted. * This state is entered from the INITIALIZED state. */ - SCI_BASE_CONTROLLER_STATE_STARTING, + SCIC_STARTING, /** * This state indicates the controller is now ready. Thus, the user * is able to perform IO operations on the controller. * This state is entered from the STARTING state. */ - SCI_BASE_CONTROLLER_STATE_READY, + SCIC_READY, /** * This state is typically an action state that indicates the controller @@ -408,7 +408,7 @@ enum scic_sds_controller_states { * This state is entered from the FAILED state. * This state is entered from the STOPPED state. */ - SCI_BASE_CONTROLLER_STATE_RESETTING, + SCIC_RESETTING, /** * This state indicates that the controller is in the process of stopping. @@ -416,14 +416,14 @@ enum scic_sds_controller_states { * operations are allowed to complete. * This state is entered from the READY state. */ - SCI_BASE_CONTROLLER_STATE_STOPPING, + SCIC_STOPPING, /** * This state indicates that the controller has successfully been stopped. * In this state no new IO operations are permitted. * This state is entered from the STOPPING state. */ - SCI_BASE_CONTROLLER_STATE_STOPPED, + SCIC_STOPPED, /** * This state indicates that the controller could not successfully be @@ -433,10 +433,7 @@ enum scic_sds_controller_states { * This state is entered from the STOPPING state. * This state is entered from the RESETTING state. */ - SCI_BASE_CONTROLLER_STATE_FAILED, - - SCI_BASE_CONTROLLER_MAX_STATES - + SCIC_FAILED, }; diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index fc196e389ff..9de21c71935 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -249,8 +249,7 @@ scic_sds_phy_link_layer_initialization(struct scic_sds_phy *sci_phy, writel(0x1F4, &sci_phy->link_layer_registers->link_layer_hang_detection_timeout); /* We can exit the initial state to the stopped state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STOPPED); + sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } @@ -273,8 +272,7 @@ static void phy_sata_timeout(unsigned long data) __func__, sci_phy); - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } @@ -342,8 +340,7 @@ enum sci_status scic_sds_phy_initialize( /* * There is nothing that needs to be done in this state just * transition to the stopped state. */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STOPPED); + sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } @@ -436,34 +433,33 @@ void scic_sds_phy_get_protocols(struct scic_sds_phy *sci_phy, enum sci_status scic_sds_phy_start(struct scic_sds_phy *sci_phy) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; - if (state != SCI_BASE_PHY_STATE_STOPPED) { + if (state != SCI_PHY_STOPPED) { dev_dbg(sciphy_to_dev(sci_phy), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); return SCI_SUCCESS; } enum sci_status scic_sds_phy_stop(struct scic_sds_phy *sci_phy) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; switch (state) { - case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN: - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF: - case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL: - case SCI_BASE_PHY_STATE_READY: + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_OSSP_EN: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: + case SCI_PHY_READY: break; default: dev_dbg(sciphy_to_dev(sci_phy), @@ -471,32 +467,30 @@ enum sci_status scic_sds_phy_stop(struct scic_sds_phy *sci_phy) return SCI_FAILURE_INVALID_STATE; } - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STOPPED); + sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } enum sci_status scic_sds_phy_reset(struct scic_sds_phy *sci_phy) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; - if (state != SCI_BASE_PHY_STATE_READY) { + if (state != SCI_PHY_READY) { dev_dbg(sciphy_to_dev(sci_phy), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_RESETTING); + sci_change_state(&sci_phy->sm, SCI_PHY_RESETTING); return SCI_SUCCESS; } enum sci_status scic_sds_phy_consume_power_handler(struct scic_sds_phy *sci_phy) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; switch (state) { - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER: { + case SCI_PHY_SUB_AWAIT_SAS_POWER: { u32 enable_spinup; enable_spinup = readl(&sci_phy->link_layer_registers->notify_enable_spinup_control); @@ -504,12 +498,11 @@ enum sci_status scic_sds_phy_consume_power_handler(struct scic_sds_phy *sci_phy) writel(enable_spinup, &sci_phy->link_layer_registers->notify_enable_spinup_control); /* Change state to the final state this substate machine has run to completion */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_FINAL); return SCI_SUCCESS; } - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER: { + case SCI_PHY_SUB_AWAIT_SATA_POWER: { u32 scu_sas_pcfg_value; /* Release the spinup hold state and reset the OOB state machine */ @@ -528,8 +521,7 @@ enum sci_status scic_sds_phy_consume_power_handler(struct scic_sds_phy *sci_phy) &sci_phy->link_layer_registers->phy_configuration); /* Change state to the final state this substate machine has run to completion */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN); return SCI_SUCCESS; } @@ -566,10 +558,7 @@ static void scic_sds_phy_start_sas_link_training( writel(phy_control, &sci_phy->link_layer_registers->phy_configuration); - sci_base_state_machine_change_state( - &sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN - ); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; } @@ -585,10 +574,7 @@ static void scic_sds_phy_start_sas_link_training( static void scic_sds_phy_start_sata_link_training( struct scic_sds_phy *sci_phy) { - sci_base_state_machine_change_state( - &sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER - ); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; } @@ -611,17 +597,16 @@ static void scic_sds_phy_complete_link_training( { sci_phy->max_negotiated_speed = max_link_rate; - sci_base_state_machine_change_state(&sci_phy->state_machine, - next_state); + sci_change_state(&sci_phy->sm, next_state); } enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, u32 event_code) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; switch (state) { - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN: + case SCI_PHY_SUB_AWAIT_OSSP_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: scic_sds_phy_start_sas_link_training(sci_phy); @@ -640,7 +625,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* @@ -652,21 +637,21 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_1_5_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF); + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_3_0_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF); + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_6_0_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF); + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* @@ -676,8 +661,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; default: dev_warn(sciphy_to_dev(sci_phy), @@ -689,7 +673,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, break; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF: + case SCI_PHY_SUB_AWAIT_IAF_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* Backup the state machine */ @@ -706,8 +690,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, case SCU_EVENT_LINK_FAILURE: case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; default: dev_warn(sciphy_to_dev(sci_phy), @@ -717,12 +700,11 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SAS_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; default: dev_warn(sciphy_to_dev(sci_phy), @@ -733,12 +715,11 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events are received every 10ms and are @@ -762,12 +743,11 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events might be received since we dont know how many may be in @@ -778,8 +758,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; /* We have received the SATA PHY notification change state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_SAS_PHY_DETECTED: /* There has been a change in the phy type before OOB/SN for the @@ -797,7 +776,7 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE;; } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* @@ -809,26 +788,25 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_1_5_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF); + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_3_0_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF); + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: scic_sds_phy_complete_link_training( sci_phy, SAS_LINK_RATE_6_0_GBPS, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF); + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SAS_PHY_DETECTED: /* @@ -846,18 +824,16 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, } return SCI_SUCCESS; - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* Backup the state machine */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; default: @@ -870,12 +846,11 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE; } return SCI_SUCCESS; - case SCI_BASE_PHY_STATE_READY: + case SCI_PHY_READY: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_BROADCAST_CHANGE: /* Broadcast change received. Notify the port. */ @@ -892,12 +867,11 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; - case SCI_BASE_PHY_STATE_RESETTING: + case SCI_PHY_RESETTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_HARD_RESET_TRANSMITTED: /* Link failure change state back to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); break; default: dev_warn(sciphy_to_dev(sci_phy), @@ -919,12 +893,12 @@ enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy, enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, u32 frame_index) { - enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id; + enum scic_sds_phy_states state = sci_phy->sm.current_state_id; struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller; enum sci_status result; switch (state) { - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF: { + case SCI_PHY_SUB_AWAIT_IAF_UF: { u32 *frame_words; struct sas_identify_frame iaf; struct isci_phy *iphy = sci_phy_to_iphy(sci_phy); @@ -946,15 +920,14 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, * state since there are no power requirements for * expander phys. */ - state = SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL; + state = SCI_PHY_SUB_FINAL; } else { /* We got the IAF we can now go to the await spinup * semaphore state */ - state = SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER; + state = SCI_PHY_SUB_AWAIT_SAS_POWER; } - sci_base_state_machine_change_state(&sci_phy->state_machine, - state); + sci_change_state(&sci_phy->sm, state); result = SCI_SUCCESS; } else dev_warn(sciphy_to_dev(sci_phy), @@ -965,7 +938,7 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, scic_sds_controller_release_frame(scic, frame_index); return result; } - case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF: { + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { struct dev_to_host_fis *frame_header; u32 *fis_frame_data; struct isci_phy *iphy = sci_phy_to_iphy(sci_phy); @@ -989,8 +962,7 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, fis_frame_data); /* got IAF we can now go to the await spinup semaphore state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_FINAL); result = SCI_SUCCESS; } else @@ -1014,16 +986,15 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); /* This is just an temporary state go off to the starting state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); } static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller; scic_sds_controller_power_control_queue_insert(scic, sci_phy); @@ -1031,7 +1002,7 @@ static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller; scic_sds_controller_power_control_queue_remove(scic, sci_phy); @@ -1039,7 +1010,7 @@ static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_ static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller; scic_sds_controller_power_control_queue_insert(scic, sci_phy); @@ -1047,7 +1018,7 @@ static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_bas static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller; scic_sds_controller_power_control_queue_remove(scic, sci_phy); @@ -1055,35 +1026,35 @@ static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); sci_mod_timer(&sci_phy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); sci_del_timer(&sci_phy->sata_timer); } static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); sci_mod_timer(&sci_phy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); sci_del_timer(&sci_phy->sata_timer); } static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); if (scic_sds_port_link_detected(sci_phy->owning_port, sci_phy)) { @@ -1103,20 +1074,19 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); sci_del_timer(&sci_phy->sata_timer); } static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); /* State machine has run to completion so exit out and change * the base state machine to the ready state */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_READY); + sci_change_state(&sci_phy->sm, SCI_PHY_READY); } /** @@ -1202,7 +1172,7 @@ static void scu_link_layer_tx_hard_reset( static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); /* * @todo We need to get to the controller to place this PE in a @@ -1212,7 +1182,7 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) scu_link_layer_stop_protocol_engine(sci_phy); - if (sci_phy->state_machine.previous_state_id != SCI_BASE_PHY_STATE_INITIAL) + if (sci_phy->sm.previous_state_id != SCI_PHY_INITIAL) scic_sds_controller_link_down(scic_sds_phy_get_controller(sci_phy), phy_get_non_dummy_port(sci_phy), sci_phy); @@ -1220,7 +1190,7 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); scu_link_layer_stop_protocol_engine(sci_phy); scu_link_layer_start_oob(sci_phy); @@ -1229,18 +1199,17 @@ static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; sci_phy->bcn_received_while_port_unassigned = false; - if (sci_phy->state_machine.previous_state_id == SCI_BASE_PHY_STATE_READY) + if (sci_phy->sm.previous_state_id == SCI_PHY_READY) scic_sds_controller_link_down(scic_sds_phy_get_controller(sci_phy), phy_get_non_dummy_port(sci_phy), sci_phy); - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL); + sci_change_state(&sci_phy->sm, SCI_PHY_SUB_INITIAL); } static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); scic_sds_controller_link_up(scic_sds_phy_get_controller(sci_phy), phy_get_non_dummy_port(sci_phy), @@ -1250,14 +1219,14 @@ static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm) static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); scic_sds_phy_suspend(sci_phy); } static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine); + struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm); /* The phy is being reset, therefore deactivate it from the port. In * the resetting state we don't notify the user regarding link up and @@ -1271,66 +1240,65 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm /* The SCU does not need to have a discrete reset state so * just go back to the starting state. */ - sci_base_state_machine_change_state(&sci_phy->state_machine, - SCI_BASE_PHY_STATE_STARTING); + sci_change_state(&sci_phy->sm, SCI_PHY_STARTING); } } static const struct sci_base_state scic_sds_phy_state_table[] = { - [SCI_BASE_PHY_STATE_INITIAL] = { }, - [SCI_BASE_PHY_STATE_STOPPED] = { + [SCI_PHY_INITIAL] = { }, + [SCI_PHY_STOPPED] = { .enter_state = scic_sds_phy_stopped_state_enter, }, - [SCI_BASE_PHY_STATE_STARTING] = { + [SCI_PHY_STARTING] = { .enter_state = scic_sds_phy_starting_state_enter, }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL] = { + [SCI_PHY_SUB_INITIAL] = { .enter_state = scic_sds_phy_starting_initial_substate_enter, }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN] = { }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN] = { }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF] = { }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER] = { + [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, + [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, + [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, + [SCI_PHY_SUB_AWAIT_SAS_POWER] = { .enter_state = scic_sds_phy_starting_await_sas_power_substate_enter, .exit_state = scic_sds_phy_starting_await_sas_power_substate_exit, }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER] = { + [SCI_PHY_SUB_AWAIT_SATA_POWER] = { .enter_state = scic_sds_phy_starting_await_sata_power_substate_enter, .exit_state = scic_sds_phy_starting_await_sata_power_substate_exit }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN] = { + [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { .enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter, .exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN] = { + [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { .enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter, .exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF] = { + [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { .enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter, .exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit }, - [SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL] = { + [SCI_PHY_SUB_FINAL] = { .enter_state = scic_sds_phy_starting_final_substate_enter, }, - [SCI_BASE_PHY_STATE_READY] = { + [SCI_PHY_READY] = { .enter_state = scic_sds_phy_ready_state_enter, .exit_state = scic_sds_phy_ready_state_exit, }, - [SCI_BASE_PHY_STATE_RESETTING] = { + [SCI_PHY_RESETTING] = { .enter_state = scic_sds_phy_resetting_state_enter, }, - [SCI_BASE_PHY_STATE_FINAL] = { }, + [SCI_PHY_FINAL] = { }, }; void scic_sds_phy_construct(struct scic_sds_phy *sci_phy, struct scic_sds_port *owning_port, u8 phy_index) { - sci_base_state_machine_construct(&sci_phy->state_machine, + sci_base_state_machine_construct(&sci_phy->sm, scic_sds_phy_state_table, - SCI_BASE_PHY_STATE_INITIAL); + SCI_PHY_INITIAL); - sci_base_state_machine_start(&sci_phy->state_machine); + sci_base_state_machine_start(&sci_phy->sm); /* Copy the rest of the input data to our locals */ sci_phy->owning_port = owning_port; diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index da3f0f1d56d..9d21d2754db 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -94,7 +94,7 @@ struct scic_sds_phy { /** * This field contains the information for the base phy state machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; /** * This field specifies the port object that owns/contains this phy. @@ -410,7 +410,7 @@ enum scic_sds_phy_states { /** * Simply the initial state for the base domain state machine. */ - SCI_BASE_PHY_STATE_INITIAL, + SCI_PHY_INITIAL, /** * This state indicates that the phy has successfully been stopped. @@ -420,7 +420,7 @@ enum scic_sds_phy_states { * This state is entered from the READY state. * This state is entered from the RESETTING state. */ - SCI_BASE_PHY_STATE_STOPPED, + SCI_PHY_STOPPED, /** * This state indicates that the phy is in the process of becomming @@ -429,57 +429,57 @@ enum scic_sds_phy_states { * This state is entered from the READY state. * This state is entered from the RESETTING state. */ - SCI_BASE_PHY_STATE_STARTING, + SCI_PHY_STARTING, /** * Initial state */ - SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL, + SCI_PHY_SUB_INITIAL, /** * Wait state for the hardware OSSP event type notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN, + SCI_PHY_SUB_AWAIT_OSSP_EN, /** * Wait state for the PHY speed notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN, + SCI_PHY_SUB_AWAIT_SAS_SPEED_EN, /** * Wait state for the IAF Unsolicited frame notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF, + SCI_PHY_SUB_AWAIT_IAF_UF, /** * Wait state for the request to consume power */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER, + SCI_PHY_SUB_AWAIT_SAS_POWER, /** * Wait state for request to consume power */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER, + SCI_PHY_SUB_AWAIT_SATA_POWER, /** * Wait state for the SATA PHY notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN, + SCI_PHY_SUB_AWAIT_SATA_PHY_EN, /** * Wait for the SATA PHY speed notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN, + SCI_PHY_SUB_AWAIT_SATA_SPEED_EN, /** * Wait state for the SIGNATURE FIS unsolicited frame notification */ - SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF, /** * Exit state for this state machine */ - SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL, + SCI_PHY_SUB_FINAL, /** * This state indicates the the phy is now ready. Thus, the user @@ -487,19 +487,19 @@ enum scic_sds_phy_states { * is currently part of a valid port. * This state is entered from the STARTING state. */ - SCI_BASE_PHY_STATE_READY, + SCI_PHY_READY, /** * This state indicates that the phy is in the process of being reset. * In this state no new IO operations are permitted on this phy. * This state is entered from the READY state. */ - SCI_BASE_PHY_STATE_RESETTING, + SCI_PHY_RESETTING, /** * Simply the final state for the base phy state machine. */ - SCI_BASE_PHY_STATE_FINAL, + SCI_PHY_FINAL, }; /** diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 8d88ca21a51..6370b93bd6a 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -807,10 +807,10 @@ static void scic_sds_port_invalid_link_up(struct scic_sds_port *sci_port, static bool is_port_ready_state(enum scic_sds_port_states state) { switch (state) { - case SCI_BASE_PORT_STATE_READY: - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: - case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING: + case SCI_PORT_READY: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: return true; default: return false; @@ -821,13 +821,13 @@ static bool is_port_ready_state(enum scic_sds_port_states state) static void port_state_machine_change(struct scic_sds_port *sci_port, enum scic_sds_port_states state) { - struct sci_base_state_machine *sm = &sci_port->state_machine; + struct sci_base_state_machine *sm = &sci_port->sm; enum scic_sds_port_states old_state = sm->current_state_id; if (is_port_ready_state(old_state) && !is_port_ready_state(state)) sci_port->ready_exit = true; - sci_base_state_machine_change_state(sm, state); + sci_change_state(sm, state); sci_port->ready_exit = false; } @@ -862,11 +862,11 @@ static void scic_sds_port_general_link_up_handler(struct scic_sds_port *sci_port if ((phy_sas_address.high == port_sas_address.high && phy_sas_address.low == port_sas_address.low) || sci_port->active_phy_mask == 0) { - struct sci_base_state_machine *sm = &sci_port->state_machine; + struct sci_base_state_machine *sm = &sci_port->sm; scic_sds_port_activate_phy(sci_port, sci_phy, do_notify_user); - if (sm->current_state_id == SCI_BASE_PORT_STATE_RESETTING) - port_state_machine_change(sci_port, SCI_BASE_PORT_STATE_READY); + if (sm->current_state_id == SCI_PORT_RESETTING) + port_state_machine_change(sci_port, SCI_PORT_READY); } else scic_sds_port_invalid_link_up(sci_port, sci_phy); } @@ -938,14 +938,14 @@ static void port_timeout(unsigned long data) if (tmr->cancel) goto done; - current_state = sci_base_state_machine_get_state(&sci_port->state_machine); + current_state = sci_port->sm.current_state_id; - if (current_state == SCI_BASE_PORT_STATE_RESETTING) { + if (current_state == SCI_PORT_RESETTING) { /* if the port is still in the resetting state then the timeout * fired before the reset completed. */ - port_state_machine_change(sci_port, SCI_BASE_PORT_STATE_FAILED); - } else if (current_state == SCI_BASE_PORT_STATE_STOPPED) { + port_state_machine_change(sci_port, SCI_PORT_FAILED); + } else if (current_state == SCI_PORT_STOPPED) { /* if the port is stopped then the start request failed In this * case stay in the stopped state. */ @@ -953,7 +953,7 @@ static void port_timeout(unsigned long data) "%s: SCIC Port 0x%p failed to stop before tiemout.\n", __func__, sci_port); - } else if (current_state == SCI_BASE_PORT_STATE_STOPPING) { + } else if (current_state == SCI_PORT_STOPPING) { /* if the port is still stopping then the stop has not completed */ isci_port_stop_complete(sci_port->owning_controller, sci_port, @@ -1139,7 +1139,7 @@ scic_sds_port_resume_port_task_scheduler(struct scic_sds_port *port) static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); scic_sds_port_suspend_port_task_scheduler(sci_port); @@ -1148,14 +1148,14 @@ static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_mac if (sci_port->active_phy_mask != 0) { /* At least one of the phys on the port is ready */ port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL); + SCI_PORT_SUB_OPERATIONAL); } } static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) { u32 index; - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); struct scic_sds_controller *scic = sci_port->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_port *iport = sci_port_to_iport(sci_port); @@ -1211,12 +1211,12 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct scic_sds_port *sci * @object: This is the object which is cast to a struct scic_sds_port object. * * This method will perform the actions required by the struct scic_sds_port on - * exiting the SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL. This function reports + * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports * the port not ready and suspends the port task scheduler. none */ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); struct scic_sds_controller *scic = sci_port->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_port *iport = sci_port_to_iport(sci_port); @@ -1236,7 +1236,7 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); struct scic_sds_controller *scic = sci_port->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_port *iport = sci_port_to_iport(sci_port); @@ -1245,15 +1245,15 @@ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state isci_port_not_ready(ihost, iport); port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_WAITING); + SCI_PORT_SUB_WAITING); } else if (sci_port->started_request_count == 0) port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL); + SCI_PORT_SUB_OPERATIONAL); } static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); scic_sds_port_suspend_port_task_scheduler(sci_port); if (sci_port->ready_exit) @@ -1267,8 +1267,8 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port) enum scic_sds_port_states state; u32 phy_mask; - state = sci_port->state_machine.current_state_id; - if (state != SCI_BASE_PORT_STATE_STOPPED) { + state = sci_port->sm.current_state_id; + if (state != SCI_PORT_STOPPED) { dev_warn(sciport_to_dev(sci_port), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -1315,7 +1315,7 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port) */ if (scic_sds_port_is_phy_mask_valid(sci_port, phy_mask) == true) { port_state_machine_change(sci_port, - SCI_BASE_PORT_STATE_READY); + SCI_PORT_READY); return SCI_SUCCESS; } @@ -1332,16 +1332,16 @@ enum sci_status scic_sds_port_stop(struct scic_sds_port *sci_port) { enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCI_BASE_PORT_STATE_STOPPED: + case SCI_PORT_STOPPED: return SCI_SUCCESS; - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: - case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING: - case SCI_BASE_PORT_STATE_RESETTING: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: + case SCI_PORT_RESETTING: port_state_machine_change(sci_port, - SCI_BASE_PORT_STATE_STOPPING); + SCI_PORT_STOPPING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(sci_port), @@ -1357,8 +1357,8 @@ static enum sci_status scic_port_hard_reset(struct scic_sds_port *sci_port, u32 enum scic_sds_port_states state; u32 phy_index; - state = sci_port->state_machine.current_state_id; - if (state != SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL) { + state = sci_port->sm.current_state_id; + if (state != SCI_PORT_SUB_OPERATIONAL) { dev_warn(sciport_to_dev(sci_port), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -1389,7 +1389,7 @@ static enum sci_status scic_port_hard_reset(struct scic_sds_port *sci_port, u32 sci_port->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; port_state_machine_change(sci_port, - SCI_BASE_PORT_STATE_RESETTING); + SCI_PORT_RESETTING); return SCI_SUCCESS; } @@ -1408,9 +1408,9 @@ enum sci_status scic_sds_port_add_phy(struct scic_sds_port *sci_port, enum sci_status status; enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCI_BASE_PORT_STATE_STOPPED: { + case SCI_PORT_STOPPED: { struct sci_sas_address port_sas_address; /* Read the port assigned SAS Address if there is one */ @@ -1430,8 +1430,8 @@ enum sci_status scic_sds_port_add_phy(struct scic_sds_port *sci_port, } return scic_sds_port_set_phy(sci_port, sci_phy); } - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: status = scic_sds_port_set_phy(sci_port, sci_phy); if (status != SCI_SUCCESS) @@ -1439,10 +1439,10 @@ enum sci_status scic_sds_port_add_phy(struct scic_sds_port *sci_port, scic_sds_port_general_link_up_handler(sci_port, sci_phy, true); sci_port->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; - port_state_machine_change(sci_port, SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING); + port_state_machine_change(sci_port, SCI_PORT_SUB_CONFIGURING); return status; - case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING: + case SCI_PORT_SUB_CONFIGURING: status = scic_sds_port_set_phy(sci_port, sci_phy); if (status != SCI_SUCCESS) @@ -1453,7 +1453,7 @@ enum sci_status scic_sds_port_add_phy(struct scic_sds_port *sci_port, * the port. */ port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING); + SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(sci_port), @@ -1477,12 +1477,12 @@ enum sci_status scic_sds_port_remove_phy(struct scic_sds_port *sci_port, enum sci_status status; enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCI_BASE_PORT_STATE_STOPPED: + case SCI_PORT_STOPPED: return scic_sds_port_clear_phy(sci_port, sci_phy); - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_SUB_OPERATIONAL: status = scic_sds_port_clear_phy(sci_port, sci_phy); if (status != SCI_SUCCESS) return status; @@ -1490,9 +1490,9 @@ enum sci_status scic_sds_port_remove_phy(struct scic_sds_port *sci_port, scic_sds_port_deactivate_phy(sci_port, sci_phy, true); sci_port->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING); + SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; - case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING: + case SCI_PORT_SUB_CONFIGURING: status = scic_sds_port_clear_phy(sci_port, sci_phy); if (status != SCI_SUCCESS) @@ -1503,7 +1503,7 @@ enum sci_status scic_sds_port_remove_phy(struct scic_sds_port *sci_port, * the port */ port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING); + SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(sci_port), @@ -1517,21 +1517,21 @@ enum sci_status scic_sds_port_link_up(struct scic_sds_port *sci_port, { enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: + case SCI_PORT_SUB_WAITING: /* Since this is the first phy going link up for the port we * can just enable it and continue */ scic_sds_port_activate_phy(sci_port, sci_phy, true); port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL); + SCI_PORT_SUB_OPERATIONAL); return SCI_SUCCESS; - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_SUB_OPERATIONAL: scic_sds_port_general_link_up_handler(sci_port, sci_phy, true); return SCI_SUCCESS; - case SCI_BASE_PORT_STATE_RESETTING: + case SCI_PORT_RESETTING: /* TODO We should make sure that the phy that has gone * link up is the same one on which we sent the reset. It is * possible that the phy on which we sent the reset is not the @@ -1560,9 +1560,9 @@ enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port, { enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_SUB_OPERATIONAL: scic_sds_port_deactivate_phy(sci_port, sci_phy, true); /* If there are no active phys left in the port, then @@ -1571,9 +1571,9 @@ enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port, */ if (sci_port->active_phy_mask == 0) port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_WAITING); + SCI_PORT_SUB_WAITING); return SCI_SUCCESS; - case SCI_BASE_PORT_STATE_RESETTING: + case SCI_PORT_RESETTING: /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ scic_sds_port_deactivate_phy(sci_port, sci_phy, false); @@ -1591,11 +1591,11 @@ enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port, { enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: + case SCI_PORT_SUB_WAITING: return SCI_FAILURE_INVALID_STATE; - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_SUB_OPERATIONAL: sci_port->started_request_count++; return SCI_SUCCESS; default: @@ -1611,31 +1611,31 @@ enum sci_status scic_sds_port_complete_io(struct scic_sds_port *sci_port, { enum scic_sds_port_states state; - state = sci_port->state_machine.current_state_id; + state = sci_port->sm.current_state_id; switch (state) { - case SCI_BASE_PORT_STATE_STOPPED: + case SCI_PORT_STOPPED: dev_warn(sciport_to_dev(sci_port), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_PORT_STATE_STOPPING: + case SCI_PORT_STOPPING: scic_sds_port_decrement_request_count(sci_port); if (sci_port->started_request_count == 0) port_state_machine_change(sci_port, - SCI_BASE_PORT_STATE_STOPPED); + SCI_PORT_STOPPED); break; - case SCI_BASE_PORT_STATE_READY: - case SCI_BASE_PORT_STATE_RESETTING: - case SCI_BASE_PORT_STATE_FAILED: - case SCIC_SDS_PORT_READY_SUBSTATE_WAITING: - case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL: + case SCI_PORT_READY: + case SCI_PORT_RESETTING: + case SCI_PORT_FAILED: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: scic_sds_port_decrement_request_count(sci_port); break; - case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING: + case SCI_PORT_SUB_CONFIGURING: scic_sds_port_decrement_request_count(sci_port); if (sci_port->started_request_count == 0) { port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL); + SCI_PORT_SUB_OPERATIONAL); } break; } @@ -1707,9 +1707,9 @@ static void scic_sds_port_post_dummy_remote_node(struct scic_sds_port *sci_port) static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); - if (sci_port->state_machine.previous_state_id == SCI_BASE_PORT_STATE_STOPPING) { + if (sci_port->sm.previous_state_id == SCI_PORT_STOPPING) { /* * If we enter this state becasuse of a request to stop * the port then we want to disable the hardwares port @@ -1720,7 +1720,7 @@ static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); /* Enable and suspend the port task scheduler */ scic_sds_port_enable_port_task_scheduler(sci_port); @@ -1728,14 +1728,14 @@ static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); struct scic_sds_controller *scic = sci_port->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_port *iport = sci_port_to_iport(sci_port); u32 prev_state; - prev_state = sci_port->state_machine.previous_state_id; - if (prev_state == SCI_BASE_PORT_STATE_RESETTING) + prev_state = sci_port->sm.previous_state_id; + if (prev_state == SCI_PORT_RESETTING) isci_port_hard_reset_complete(iport, SCI_SUCCESS); else isci_port_not_ready(ihost, iport); @@ -1745,19 +1745,19 @@ static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) /* Start the ready substate machine */ port_state_machine_change(sci_port, - SCIC_SDS_PORT_READY_SUBSTATE_WAITING); + SCI_PORT_SUB_WAITING); } static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); sci_del_timer(&sci_port->timer); } static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); sci_del_timer(&sci_port->timer); @@ -1766,7 +1766,7 @@ static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm) static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine); + struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm); struct isci_port *iport = sci_port_to_iport(sci_port); isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); @@ -1775,31 +1775,31 @@ static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) /* --------------------------------------------------------------------------- */ static const struct sci_base_state scic_sds_port_state_table[] = { - [SCI_BASE_PORT_STATE_STOPPED] = { + [SCI_PORT_STOPPED] = { .enter_state = scic_sds_port_stopped_state_enter, .exit_state = scic_sds_port_stopped_state_exit }, - [SCI_BASE_PORT_STATE_STOPPING] = { + [SCI_PORT_STOPPING] = { .exit_state = scic_sds_port_stopping_state_exit }, - [SCI_BASE_PORT_STATE_READY] = { + [SCI_PORT_READY] = { .enter_state = scic_sds_port_ready_state_enter, }, - [SCIC_SDS_PORT_READY_SUBSTATE_WAITING] = { + [SCI_PORT_SUB_WAITING] = { .enter_state = scic_sds_port_ready_substate_waiting_enter, }, - [SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL] = { + [SCI_PORT_SUB_OPERATIONAL] = { .enter_state = scic_sds_port_ready_substate_operational_enter, .exit_state = scic_sds_port_ready_substate_operational_exit }, - [SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING] = { + [SCI_PORT_SUB_CONFIGURING] = { .enter_state = scic_sds_port_ready_substate_configuring_enter, .exit_state = scic_sds_port_ready_substate_configuring_exit }, - [SCI_BASE_PORT_STATE_RESETTING] = { + [SCI_PORT_RESETTING] = { .exit_state = scic_sds_port_resetting_state_exit }, - [SCI_BASE_PORT_STATE_FAILED] = { + [SCI_PORT_FAILED] = { .enter_state = scic_sds_port_failed_state_enter, } }; @@ -1807,11 +1807,11 @@ static const struct sci_base_state scic_sds_port_state_table[] = { void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index, struct scic_sds_controller *scic) { - sci_base_state_machine_construct(&sci_port->state_machine, + sci_base_state_machine_construct(&sci_port->sm, scic_sds_port_state_table, - SCI_BASE_PORT_STATE_STOPPED); + SCI_PORT_STOPPED); - sci_base_state_machine_start(&sci_port->state_machine); + sci_base_state_machine_start(&sci_port->sm); sci_port->logical_port_index = SCIC_SDS_DUMMY_PORT; sci_port->physical_port_index = index; diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 9a6912855fc..fee6d80df76 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -84,7 +84,7 @@ struct scic_sds_port { /** * This field contains the information for the base port state machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; bool ready_exit; @@ -224,7 +224,7 @@ enum scic_sds_port_states { * In this state no new IO operations are permitted. * This state is entered from the STOPPING state. */ - SCI_BASE_PORT_STATE_STOPPED, + SCI_PORT_STOPPED, /** * This state indicates that the port is in the process of stopping. @@ -232,33 +232,33 @@ enum scic_sds_port_states { * operations are allowed to complete. * This state is entered from the READY state. */ - SCI_BASE_PORT_STATE_STOPPING, + SCI_PORT_STOPPING, /** * This state indicates the port is now ready. Thus, the user is * able to perform IO operations on this port. * This state is entered from the STARTING state. */ - SCI_BASE_PORT_STATE_READY, + SCI_PORT_READY, /** * The substate where the port is started and ready but has no * active phys. */ - SCIC_SDS_PORT_READY_SUBSTATE_WAITING, + SCI_PORT_SUB_WAITING, /** * The substate where the port is started and ready and there is * at least one phy operational. */ - SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL, + SCI_PORT_SUB_OPERATIONAL, /** * The substate where the port is started and there was an * add/remove phy event. This state is only used in Automatic * Port Configuration Mode (APC) */ - SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING, + SCI_PORT_SUB_CONFIGURING, /** * This state indicates the port is in the process of performing a hard @@ -266,14 +266,14 @@ enum scic_sds_port_states { * port. * This state is entered from the READY state. */ - SCI_BASE_PORT_STATE_RESETTING, + SCI_PORT_RESETTING, /** * This state indicates the port has failed a reset request. This state * is entered when a port reset request times out. * This state is entered from the RESETTING state. */ - SCI_BASE_PORT_STATE_FAILED, + SCI_PORT_FAILED, }; diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 38401f644e1..fcb8f030b7a 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -661,13 +661,13 @@ static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, scic_sds_apc_agent_configure_ports(scic, port_agent, sci_phy, true); } else { /* the phy is already the part of the port */ - u32 port_state = sci_port->state_machine.current_state_id; + u32 port_state = sci_port->sm.current_state_id; /* if the PORT'S state is resetting then the link up is from * port hard reset in this case, we need to tell the port * that link up is recieved */ - BUG_ON(port_state != SCI_BASE_PORT_STATE_RESETTING); + BUG_ON(port_state != SCI_PORT_RESETTING); port_agent->phy_ready_mask |= 1 << phy_index; scic_sds_port_link_up(sci_port, sci_phy); } diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 68b63b04be1..6c93f20f3dd 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -126,8 +126,7 @@ static void rnc_destruct_done(void *_dev) struct scic_sds_remote_device *sci_dev = _dev; BUG_ON(sci_dev->started_request_count != 0); - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCI_BASE_REMOTE_DEVICE_STATE_STOPPED); + sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED); } static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds_remote_device *sci_dev) @@ -154,20 +153,20 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, u32 timeout) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: + case SCI_DEV_STOPPED: return SCI_SUCCESS; - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: + case SCI_DEV_STARTING: /* device not started so there had better be no requests */ BUG_ON(sci_dev->started_request_count != 0); scic_sds_remote_node_context_destruct(&sci_dev->rnc, @@ -175,17 +174,17 @@ enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, /* Transition to the stopping state and wait for the * remote node to complete being posted and invalidated. */ - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING); + sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING); + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + sci_change_state(sm, SCI_DEV_STOPPING); if (sci_dev->started_request_count == 0) { scic_sds_remote_node_context_destruct(&sci_dev->rnc, rnc_destruct_done, sci_dev); @@ -193,70 +192,70 @@ enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, } else return scic_sds_remote_device_terminate_requests(sci_dev); break; - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: + case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then * try again to terminate. */ return scic_sds_remote_device_terminate_requests(sci_dev); - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING); + case SCI_DEV_RESETTING: + sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; } } enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_RESETTING); + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + sci_change_state(sm, SCI_DEV_RESETTING); return SCI_SUCCESS; } } enum sci_status scic_remote_device_reset_complete(struct scic_sds_remote_device *sci_dev) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - if (state != SCI_BASE_REMOTE_DEVICE_STATE_RESETTING) { + if (state != SCI_DEV_RESETTING) { dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_READY); + sci_change_state(sm, SCI_DEV_READY); return SCI_SUCCESS; } enum sci_status scic_sds_remote_device_suspend(struct scic_sds_remote_device *sci_dev, u32 suspend_type) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - if (state != SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD) { + if (state != SCI_STP_DEV_CMD) { dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -269,30 +268,30 @@ enum sci_status scic_sds_remote_device_suspend(struct scic_sds_remote_device *sc enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_device *sci_dev, u32 frame_index) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; enum sci_status status; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); /* Return the frame back to the controller */ scic_sds_controller_release_frame(scic, frame_index); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: { + case SCI_DEV_READY: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: { struct scic_sds_request *sci_req; struct ssp_frame_hdr hdr; void *frame_header; @@ -319,7 +318,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi } break; } - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: { + case SCI_STP_DEV_NCQ: { struct dev_to_host_fis *hdr; status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, @@ -333,7 +332,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; /* TODO Check sactive and complete associated IO if any. */ - sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR); + sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); } else if (hdr->fis_type == FIS_REGD2H && (hdr->status & ATA_ERR)) { /* @@ -341,16 +340,15 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi * Treat this like an SDB error FIS ready reason. */ sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR); + sci_change_state(&sci_dev->sm, SCI_STP_DEV_NCQ_ERROR); } else status = SCI_FAILURE; scic_sds_controller_release_frame(scic, frame_index); break; } - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: /* The device does not process any UF received from the hardware while * in this state. All unsolicited frames are forwarded to the io request * object. @@ -365,18 +363,18 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_READY: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: return true; default: return false; @@ -386,7 +384,7 @@ static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev) enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_device *sci_dev, u32 event_code) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; enum sci_status status; @@ -429,7 +427,7 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi if (status != SCI_SUCCESS) return status; - if (state == SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE) { + if (state == SCI_STP_DEV_IDLE) { /* We pick up suspension events to handle specifically to this * state. We resume the RNC right away. @@ -459,26 +457,26 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic struct scic_sds_remote_device *sci_dev, struct scic_sds_request *sci_req) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_port *sci_port = sci_dev->owning_port; struct isci_request *ireq = sci_req_to_ireq(sci_req); enum sci_status status; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: + case SCI_DEV_READY: /* attempt to start an io request for this device object. The remote * device object will issue the start request for the io and if * successful it will start the request for the port object then @@ -494,7 +492,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic status = scic_sds_request_start(sci_req); break; - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: { + case SCI_STP_DEV_IDLE: { /* handle the start io operation for a sata device that is in * the command idle state. - Evalute the type of IO request to * be started - If its an NCQ request change to NCQ substate - @@ -519,15 +517,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic break; if (task->ata_task.use_ncq) - new_state = SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ; + new_state = SCI_STP_DEV_NCQ; else { sci_dev->working_request = sci_req; - new_state = SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD; + new_state = SCI_STP_DEV_CMD; } - sci_base_state_machine_change_state(sm, new_state); + sci_change_state(sm, new_state); break; } - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: { + case SCI_STP_DEV_NCQ: { struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { @@ -544,9 +542,9 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic return SCI_FAILURE_INVALID_STATE; break; } - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: + case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: + case SCI_SMP_DEV_IDLE: status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) return status; @@ -560,11 +558,10 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic break; sci_dev->working_request = sci_req; - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD); + sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD); break; - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: /* device is already handling a command it can not accept new commands * until this one is complete. */ @@ -597,31 +594,31 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s struct scic_sds_remote_device *sci_dev, struct scic_sds_request *sci_req) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_port *sci_port = sci_dev->owning_port; enum sci_status status; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: + case SCI_DEV_READY: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_RESETTING: status = common_complete_io(sci_port, sci_dev, sci_req); break; - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: status = common_complete_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) break; @@ -632,17 +629,17 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s * can reach RNC state handler, these IOs will be completed by RNC with * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ - sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET); + sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); } else if (scic_sds_remote_device_get_request_count(sci_dev) == 0) - sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE); + sci_change_state(sm, SCI_STP_DEV_IDLE); break; - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: + case SCI_SMP_DEV_CMD: status = common_complete_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) break; - sci_base_state_machine_change_state(sm, SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE); + sci_change_state(sm, SCI_SMP_DEV_IDLE); break; - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: + case SCI_DEV_STOPPING: status = common_complete_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) break; @@ -676,30 +673,30 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc struct scic_sds_remote_device *sci_dev, struct scic_sds_request *sci_req) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_port *sci_port = sci_dev->owning_port; enum sci_status status; switch (state) { - case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED: - case SCI_BASE_REMOTE_DEVICE_STATE_STARTING: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING: - case SCI_BASE_REMOTE_DEVICE_STATE_FAILED: - case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: - case SCI_BASE_REMOTE_DEVICE_STATE_FINAL: + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR: - case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) return status; @@ -717,7 +714,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * management request. */ sci_dev->working_request = sci_req; - sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD); + sci_change_state(sm, SCI_STP_DEV_CMD); /* The remote node context must cleanup the TCi to NCQ mapping * table. The only way to do this correctly is to either write @@ -739,7 +736,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; - case SCI_BASE_REMOTE_DEVICE_STATE_READY: + case SCI_DEV_READY: status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); if (status != SCI_SUCCESS) return status; @@ -790,8 +787,7 @@ static void remote_device_resume_done(void *_dev) return; /* go 'ready' if we are not already in a ready state */ - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCI_BASE_REMOTE_DEVICE_STATE_READY); + sci_change_state(&sci_dev->sm, SCI_DEV_READY); } static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) @@ -803,17 +799,16 @@ static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handl /* For NCQ operation we do not issue a isci_remote_device_not_ready(). * As a result, avoid sending the ready notification. */ - if (sci_dev->state_machine.previous_state_id != SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ) + if (sci_dev->sm.previous_state_id != SCI_STP_DEV_NCQ) isci_remote_device_ready(scic_to_ihost(scic), idev); } static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); /* Initial state is a transitional state to the stopped state */ - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCI_BASE_REMOTE_DEVICE_STATE_STOPPED); + sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED); } /** @@ -831,11 +826,11 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac */ static enum sci_status scic_remote_device_destruct(struct scic_sds_remote_device *sci_dev) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_controller *scic; - if (state != SCI_BASE_REMOTE_DEVICE_STATE_STOPPED) { + if (state != SCI_DEV_STOPPED) { dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -845,7 +840,7 @@ static enum sci_status scic_remote_device_destruct(struct scic_sds_remote_device scic_sds_controller_free_remote_node_context(scic, sci_dev, sci_dev->rnc.remote_node_index); sci_dev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_FINAL); + sci_change_state(sm, SCI_DEV_FINAL); return SCI_SUCCESS; } @@ -906,7 +901,7 @@ static void isci_remote_device_stop_complete(struct isci_host *ihost, static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); u32 prev_state; @@ -914,8 +909,8 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac /* If we are entering from the stopping state let the SCI User know that * the stop operation has completed. */ - prev_state = sci_dev->state_machine.previous_state_id; - if (prev_state == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING) + prev_state = sci_dev->sm.previous_state_id; + if (prev_state == SCI_DEV_STOPPING) isci_remote_device_stop_complete(scic_to_ihost(scic), idev); scic_sds_controller_remote_device_stopped(scic, sci_dev); @@ -923,7 +918,7 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); struct isci_host *ihost = scic_to_ihost(scic); struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); @@ -934,7 +929,7 @@ static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_ma static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); struct domain_device *dev = idev->domain_dev; @@ -942,18 +937,16 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi scic->remote_device_sequence[sci_dev->rnc.remote_node_index]++; if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE); + sci_change_state(&sci_dev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { - sci_base_state_machine_change_state(&sci_dev->state_machine, - SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE); + sci_change_state(&sci_dev->sm, SCI_SMP_DEV_IDLE); } else isci_remote_device_ready(scic_to_ihost(scic), idev); } static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct domain_device *dev = sci_dev_to_domain(sci_dev); if (dev->dev_type == SAS_END_DEV) { @@ -967,7 +960,7 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); scic_sds_remote_node_context_suspend( &sci_dev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); @@ -975,14 +968,14 @@ static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_m static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL); } static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); sci_dev->working_request = NULL; if (scic_sds_remote_node_context_is_ready(&sci_dev->rnc)) { @@ -999,7 +992,7 @@ static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); BUG_ON(sci_dev->working_request == NULL); @@ -1010,7 +1003,7 @@ static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_ static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); @@ -1021,7 +1014,7 @@ static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); isci_remote_device_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev)); @@ -1029,7 +1022,7 @@ static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); BUG_ON(sci_dev->working_request == NULL); @@ -1040,50 +1033,50 @@ static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_ static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine); + struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); sci_dev->working_request = NULL; } static const struct sci_base_state scic_sds_remote_device_state_table[] = { - [SCI_BASE_REMOTE_DEVICE_STATE_INITIAL] = { + [SCI_DEV_INITIAL] = { .enter_state = scic_sds_remote_device_initial_state_enter, }, - [SCI_BASE_REMOTE_DEVICE_STATE_STOPPED] = { + [SCI_DEV_STOPPED] = { .enter_state = scic_sds_remote_device_stopped_state_enter, }, - [SCI_BASE_REMOTE_DEVICE_STATE_STARTING] = { + [SCI_DEV_STARTING] = { .enter_state = scic_sds_remote_device_starting_state_enter, }, - [SCI_BASE_REMOTE_DEVICE_STATE_READY] = { + [SCI_DEV_READY] = { .enter_state = scic_sds_remote_device_ready_state_enter, .exit_state = scic_sds_remote_device_ready_state_exit }, - [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = { + [SCI_STP_DEV_IDLE] = { .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter, }, - [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD] = { + [SCI_STP_DEV_CMD] = { .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter, }, - [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ] = { }, - [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR] = { + [SCI_STP_DEV_NCQ] = { }, + [SCI_STP_DEV_NCQ_ERROR] = { .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter, }, - [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET] = { }, - [SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = { + [SCI_STP_DEV_AWAIT_RESET] = { }, + [SCI_SMP_DEV_IDLE] = { .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter, }, - [SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD] = { + [SCI_SMP_DEV_CMD] = { .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter, .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit, }, - [SCI_BASE_REMOTE_DEVICE_STATE_STOPPING] = { }, - [SCI_BASE_REMOTE_DEVICE_STATE_FAILED] = { }, - [SCI_BASE_REMOTE_DEVICE_STATE_RESETTING] = { + [SCI_DEV_STOPPING] = { }, + [SCI_DEV_FAILED] = { }, + [SCI_DEV_RESETTING] = { .enter_state = scic_sds_remote_device_resetting_state_enter, .exit_state = scic_sds_remote_device_resetting_state_exit }, - [SCI_BASE_REMOTE_DEVICE_STATE_FINAL] = { }, + [SCI_DEV_FINAL] = { }, }; /** @@ -1102,11 +1095,11 @@ static void scic_remote_device_construct(struct scic_sds_port *sci_port, sci_dev->owning_port = sci_port; sci_dev->started_request_count = 0; - sci_base_state_machine_construct(&sci_dev->state_machine, + sci_base_state_machine_construct(&sci_dev->sm, scic_sds_remote_device_state_table, - SCI_BASE_REMOTE_DEVICE_STATE_INITIAL); + SCI_DEV_INITIAL); - sci_base_state_machine_start(&sci_dev->state_machine); + sci_base_state_machine_start(&sci_dev->sm); scic_sds_remote_node_context_construct(&sci_dev->rnc, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); @@ -1224,11 +1217,11 @@ static enum sci_status scic_remote_device_ea_construct(struct scic_sds_port *sci static enum sci_status scic_remote_device_start(struct scic_sds_remote_device *sci_dev, u32 timeout) { - struct sci_base_state_machine *sm = &sci_dev->state_machine; + struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; enum sci_status status; - if (state != SCI_BASE_REMOTE_DEVICE_STATE_STOPPED) { + if (state != SCI_DEV_STOPPED) { dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -1240,7 +1233,7 @@ static enum sci_status scic_remote_device_start(struct scic_sds_remote_device *s if (status != SCI_SUCCESS) return status; - sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STARTING); + sci_change_state(sm, SCI_DEV_STARTING); return SCI_SUCCESS; } diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index a118f5873f6..2b6a5bb7bd6 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -74,7 +74,7 @@ struct scic_sds_remote_device { * This field contains the information for the base remote device state * machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; /** * This field is the programmed device port width. This value is @@ -109,7 +109,7 @@ struct scic_sds_remote_device { /** * This field contains the stated request count for the remote device. The - * device can not reach the SCI_BASE_REMOTE_DEVICE_STATE_STOPPED until all + * device can not reach the SCI_DEV_STOPPED until all * requests are complete and the rnc_posted value is false. */ u32 started_request_count; @@ -213,7 +213,7 @@ enum scic_sds_remote_device_states { /** * Simply the initial state for the base remote device state machine. */ - SCI_BASE_REMOTE_DEVICE_STATE_INITIAL, + SCI_DEV_INITIAL, /** * This state indicates that the remote device has successfully been @@ -221,7 +221,7 @@ enum scic_sds_remote_device_states { * This state is entered from the INITIAL state. * This state is entered from the STOPPING state. */ - SCI_BASE_REMOTE_DEVICE_STATE_STOPPED, + SCI_DEV_STOPPED, /** * This state indicates the the remote device is in the process of @@ -229,34 +229,34 @@ enum scic_sds_remote_device_states { * are permitted. * This state is entered from the STOPPED state. */ - SCI_BASE_REMOTE_DEVICE_STATE_STARTING, + SCI_DEV_STARTING, /** * This state indicates the remote device is now ready. Thus, the user * is able to perform IO operations on the remote device. * This state is entered from the STARTING state. */ - SCI_BASE_REMOTE_DEVICE_STATE_READY, + SCI_DEV_READY, /** * This is the idle substate for the stp remote device. When there are no * active IO for the device it is is in this state. */ - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE, + SCI_STP_DEV_IDLE, /** * This is the command state for for the STP remote device. This state is * entered when the device is processing a non-NCQ command. The device object * will fail any new start IO requests until this command is complete. */ - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD, + SCI_STP_DEV_CMD, /** * This is the NCQ state for the STP remote device. This state is entered * when the device is processing an NCQ reuqest. It will remain in this state * so long as there is one or more NCQ requests being processed. */ - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ, + SCI_STP_DEV_NCQ, /** * This is the NCQ error state for the STP remote device. This state is @@ -264,25 +264,25 @@ enum scic_sds_remote_device_states { * NCQ state. The device object will only accept a READ LOG command while in * this state. */ - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR, + SCI_STP_DEV_NCQ_ERROR, /** * This is the READY substate indicates the device is waiting for the RESET task * coming to be recovered from certain hardware specific error. */ - SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET, + SCI_STP_DEV_AWAIT_RESET, /** * This is the ready operational substate for the remote device. This is the * normal operational state for a remote device. */ - SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE, + SCI_SMP_DEV_IDLE, /** * This is the suspended state for the remote device. This is the state that * the device is placed in when a RNC suspend is received by the SCU hardware. */ - SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD, + SCI_SMP_DEV_CMD, /** * This state indicates that the remote device is in the process of @@ -291,7 +291,7 @@ enum scic_sds_remote_device_states { * This state is entered from the READY state. * This state is entered from the FAILED state. */ - SCI_BASE_REMOTE_DEVICE_STATE_STOPPING, + SCI_DEV_STOPPING, /** * This state indicates that the remote device has failed. @@ -299,19 +299,19 @@ enum scic_sds_remote_device_states { * This state is entered from the INITIALIZING state. * This state is entered from the READY state. */ - SCI_BASE_REMOTE_DEVICE_STATE_FAILED, + SCI_DEV_FAILED, /** * This state indicates the device is being reset. * In this state no new IO operations are permitted. * This state is entered from the READY state. */ - SCI_BASE_REMOTE_DEVICE_STATE_RESETTING, + SCI_DEV_RESETTING, /** * Simply the final state for the base remote device state machine. */ - SCI_BASE_REMOTE_DEVICE_STATE_FINAL, + SCI_DEV_FINAL, }; static inline struct scic_sds_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index e7fa5bac7d5..24b1d8acf7b 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -84,9 +84,9 @@ bool scic_sds_remote_node_context_is_ready( struct scic_sds_remote_node_context *sci_rnc) { - u32 current_state = sci_base_state_machine_get_state(&sci_rnc->state_machine); + u32 current_state = sci_rnc->sm.current_state_id; - if (current_state == SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) { + if (current_state == SCI_RNC_READY) { return true; } @@ -268,12 +268,12 @@ static void scic_sds_remote_node_context_invalidate_context_buffer( static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ - if (sm->previous_state_id == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE) { + if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; scic_sds_remote_node_context_notify_user(rnc); } @@ -281,21 +281,21 @@ static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_sta static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), state_machine); + struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); scic_sds_remote_node_context_validate_context_buffer(sci_rnc); } static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); scic_sds_remote_node_context_invalidate_context_buffer(rnc); } static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct scic_sds_remote_device *sci_dev; struct domain_device *dev; @@ -318,7 +318,7 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; @@ -328,41 +328,41 @@ static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); scic_sds_remote_node_context_continue_state_transitions(rnc); } static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine); + struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); scic_sds_remote_node_context_continue_state_transitions(rnc); } static const struct sci_base_state scic_sds_remote_node_context_state_table[] = { - [SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE] = { + [SCI_RNC_INITIAL] = { .enter_state = scic_sds_remote_node_context_initial_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE] = { + [SCI_RNC_POSTING] = { .enter_state = scic_sds_remote_node_context_posting_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE] = { + [SCI_RNC_INVALIDATING] = { .enter_state = scic_sds_remote_node_context_invalidating_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE] = { + [SCI_RNC_RESUMING] = { .enter_state = scic_sds_remote_node_context_resuming_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE] = { + [SCI_RNC_READY] = { .enter_state = scic_sds_remote_node_context_ready_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE] = { + [SCI_RNC_TX_SUSPENDED] = { .enter_state = scic_sds_remote_node_context_tx_suspended_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE] = { + [SCI_RNC_TX_RX_SUSPENDED] = { .enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter, }, - [SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE] = { }, + [SCI_RNC_AWAIT_SUSPENSION] = { }, }; void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, @@ -373,11 +373,11 @@ void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context rnc->remote_node_index = remote_node_index; rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - sci_base_state_machine_construct(&rnc->state_machine, + sci_base_state_machine_construct(&rnc->sm, scic_sds_remote_node_context_state_table, - SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE); + SCI_RNC_INITIAL); - sci_base_state_machine_start(&rnc->state_machine); + sci_base_state_machine_start(&rnc->sm); } enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, @@ -385,26 +385,24 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; + state = sci_rnc->sm.current_state_id; switch (state) { - case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE: + case SCI_RNC_POSTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_POST_RNC_COMPLETE: - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); break; default: goto out; } break; - case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE: + case SCI_RNC_INVALIDATING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) - state = SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE; + state = SCI_RNC_INITIAL; else - state = SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE; - sci_base_state_machine_change_state(&sci_rnc->state_machine, - state); + state = SCI_RNC_POSTING; + sci_change_state(&sci_rnc->sm, state); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: @@ -421,10 +419,9 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot } } break; - case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE: + case SCI_RNC_RESUMING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: @@ -441,32 +438,28 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot } } break; - case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE: + case SCI_RNC_READY: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; default: goto out; } break; - case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE: + case SCI_RNC_AWAIT_SUSPENSION: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; default: @@ -493,22 +486,21 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; + state = sci_rnc->sm.current_state_id; switch (state) { - case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE: + case SCI_RNC_INVALIDATING: scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; - case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE: + case SCI_RNC_POSTING: + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + case SCI_RNC_AWAIT_SUSPENSION: scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; - case SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE: + case SCI_RNC_INITIAL: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); /* We have decided that the destruct request on the remote node context @@ -530,8 +522,8 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; - if (state != SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) { + state = sci_rnc->sm.current_state_id; + if (state != SCI_RNC_READY) { dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -546,8 +538,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); } - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); return SCI_SUCCESS; } @@ -557,27 +548,26 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; + state = sci_rnc->sm.current_state_id; switch (state) { - case SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE: + case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); scic_sds_remote_node_context_construct_buffer(sci_rnc); - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); return SCI_SUCCESS; - case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE: + case SCI_RNC_POSTING: + case SCI_RNC_INVALIDATING: + case SCI_RNC_RESUMING: if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) return SCI_FAILURE_INVALID_STATE; sci_rnc->user_callback = cb_fn; sci_rnc->user_cookie = cb_p; return SCI_SUCCESS; - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE: { + case SCI_RNC_TX_SUSPENDED: { struct scic_sds_remote_device *sci_dev = rnc_to_dev(sci_rnc); struct domain_device *dev = sci_dev_to_domain(sci_dev); @@ -585,27 +575,23 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { if (sci_dev->is_direct_attached) { /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } else { - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); } } else return SCI_FAILURE; return SCI_SUCCESS; } - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE: + case SCI_RNC_TX_RX_SUSPENDED: scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - sci_base_state_machine_change_state(&sci_rnc->state_machine, - SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE); + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); return SCI_FAILURE_INVALID_STATE; - case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE: + case SCI_RNC_AWAIT_SUSPENSION: scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; default: @@ -620,8 +606,8 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; - if (state != SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) { + state = sci_rnc->sm.current_state_id; + if (state != SCI_RNC_READY) { dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; @@ -634,14 +620,14 @@ enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_n { enum scis_sds_remote_node_context_states state; - state = sci_rnc->state_machine.current_state_id; + state = sci_rnc->sm.current_state_id; switch (state) { - case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE: + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_AWAIT_SUSPENSION: return SCI_SUCCESS; - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE: - case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE: + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL); return SCI_SUCCESS; default: diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index f53329f782c..e6c7248be3f 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -92,45 +92,45 @@ enum scis_sds_remote_node_context_states { * This state is the initial state for a remote node context. On a resume * request the remote node context will transition to the posting state. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE, + SCI_RNC_INITIAL, /** * This is a transition state that posts the RNi to the hardware. Once the RNC * is posted the remote node context will be made ready. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE, + SCI_RNC_POSTING, /** * This is a transition state that will post an RNC invalidate to the * hardware. Once the invalidate is complete the remote node context will * transition to the posting state. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE, + SCI_RNC_INVALIDATING, /** * This is a transition state that will post an RNC resume to the hardare. * Once the event notification of resume complete is received the remote node * context will transition to the ready state. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE, + SCI_RNC_RESUMING, /** * This is the state that the remote node context must be in to accept io * request operations. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE, + SCI_RNC_READY, /** * This is the state that the remote node context transitions to when it gets * a TX suspend notification from the hardware. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE, + SCI_RNC_TX_SUSPENDED, /** * This is the state that the remote node context transitions to when it gets * a TX RX suspend notification from the hardware. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE, + SCI_RNC_TX_RX_SUSPENDED, /** * This state is a wait state for the remote node context that waits for a @@ -138,7 +138,7 @@ enum scis_sds_remote_node_context_states { * there is a request to supend the remote node context or when there is a TC * completion where the remote node will be suspended by the hardware. */ - SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE + SCI_RNC_AWAIT_SUSPENSION }; /** @@ -194,7 +194,7 @@ struct scic_sds_remote_node_context { /** * This field contains the data for the object's state machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; }; void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 31c9b2c3425..89f0ab925c2 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -649,8 +649,7 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_reque scic_sds_io_request_build_ssp_command_iu(sci_req); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } @@ -664,8 +663,7 @@ enum sci_status scic_task_request_construct_ssp( /* Fill in the SSP Task IU */ scic_sds_task_request_build_ssp_task_iu(sci_req); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } @@ -687,8 +685,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_requ copy); if (status == SCI_SUCCESS) - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); return status; } @@ -718,8 +715,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (status != SCI_SUCCESS) return status; - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); return status; } @@ -761,8 +757,8 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) scic_sds_remote_device_get_sequence(sci_req->target_device)) return SCI_FAILURE; - state = sci_req->state_machine.current_state_id; - if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) { + state = sci_req->sm.current_state_id; + if (state != SCI_REQ_CONSTRUCTED) { dev_warn(scic_to_dev(scic), "%s: SCIC IO Request requested to start while in wrong " "state %d\n", __func__, state); @@ -818,8 +814,7 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag); /* Everything is good go ahead and change state */ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_STARTED); + sci_change_state(&sci_req->sm, SCI_REQ_STARTED); return SCI_SUCCESS; } @@ -832,52 +827,47 @@ scic_sds_io_request_terminate(struct scic_sds_request *sci_req) { enum sci_base_request_states state; - state = sci_req->state_machine.current_state_id; + state = sci_req->sm.current_state_id; switch (state) { - case SCI_BASE_REQUEST_STATE_CONSTRUCTED: + case SCI_REQ_CONSTRUCTED: scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; - case SCI_BASE_REQUEST_STATE_STARTED: - case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: - case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: - case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: - case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); + case SCI_REQ_STARTED: + case SCI_REQ_TASK_WAIT_TC_COMP: + case SCI_REQ_SMP_WAIT_RESP: + case SCI_REQ_SMP_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_D2H: + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + case SCI_REQ_STP_NON_DATA_WAIT_D2H: + case SCI_REQ_STP_PIO_WAIT_H2D: + case SCI_REQ_STP_PIO_WAIT_FRAME: + case SCI_REQ_STP_PIO_DATA_IN: + case SCI_REQ_STP_PIO_DATA_OUT: + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: + case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: + sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; - case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_ABORTING); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + case SCI_REQ_TASK_WAIT_TC_RESP: + sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; - case SCI_BASE_REQUEST_STATE_ABORTING: - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + case SCI_REQ_ABORTING: + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; - case SCI_BASE_REQUEST_STATE_COMPLETED: + case SCI_REQ_COMPLETED: default: dev_warn(scic_to_dev(sci_req->owning_controller), "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, - sci_base_state_machine_get_state(&sci_req->state_machine)); + sci_req->sm.current_state_id); break; } @@ -889,8 +879,8 @@ enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) enum sci_base_request_states state; struct scic_sds_controller *scic = sci_req->owning_controller; - state = sci_req->state_machine.current_state_id; - if (WARN_ONCE(state != SCI_BASE_REQUEST_STATE_COMPLETED, + state = sci_req->sm.current_state_id; + if (WARN_ONCE(state != SCI_REQ_COMPLETED, "isci: request completion from wrong state (%d)\n", state)) return SCI_FAILURE_INVALID_STATE; @@ -902,8 +892,7 @@ enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) sci_req->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_FINAL); + sci_change_state(&sci_req->sm, SCI_REQ_FINAL); return SCI_SUCCESS; } @@ -913,9 +902,9 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r enum sci_base_request_states state; struct scic_sds_controller *scic = sci_req->owning_controller; - state = sci_req->state_machine.current_state_id; + state = sci_req->sm.current_state_id; - if (state != SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE) { + if (state != SCI_REQ_STP_PIO_DATA_IN) { dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", __func__, event_code, state); @@ -927,8 +916,7 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r /* We are waiting for data and the SCU has R_ERR the data frame. * Go back to waiting for the D2H Register FIS */ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default: dev_err(scic_to_dev(scic), @@ -967,8 +955,9 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) memcpy(resp_buf, ssp_response->resp_data, len); } -static enum sci_status request_started_state_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +request_started_state_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { struct ssp_response_iu *resp_iu; u8 datapres; @@ -1110,13 +1099,13 @@ static enum sci_status request_started_state_tc_event(struct scic_sds_request *s */ /* In all cases we will treat this as the completion of the IO req. */ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; } -static enum sci_status request_aborting_state_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +request_aborting_state_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): @@ -1124,8 +1113,7 @@ static enum sci_status request_aborting_state_tc_event(struct scic_sds_request * scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; default: @@ -1146,8 +1134,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): /* Currently, the decision is to simply allow the task request @@ -1160,27 +1147,28 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * "ACK/NAK timeout\n", __func__, sci_req, completion_code); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); + sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; default: - /* All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. */ scic_sds_request_set_status(sci_req, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } -static enum sci_status smp_request_await_response_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +smp_request_await_response_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): @@ -1191,8 +1179,7 @@ static enum sci_status smp_request_await_response_tc_event(struct scic_sds_reque scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): @@ -1209,8 +1196,7 @@ static enum sci_status smp_request_await_response_tc_event(struct scic_sds_reque scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; default: @@ -1221,24 +1207,23 @@ static enum sci_status smp_request_await_response_tc_event(struct scic_sds_reque SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } -static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +smp_request_await_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be @@ -1249,8 +1234,7 @@ static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_r SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } @@ -1311,16 +1295,16 @@ static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic return current_sgl; } -static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); break; default: @@ -1332,8 +1316,7 @@ static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_r SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } @@ -1509,17 +1492,19 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( return status; } -static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_status status = SCI_SUCCESS; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_GOOD, + SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; default: @@ -1531,16 +1516,16 @@ static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } return status; } -static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_status status = SCI_SUCCESS; bool all_frames_transferred = false; @@ -1566,28 +1551,24 @@ static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sc /* all data transferred. */ if (all_frames_transferred) { /* - * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE + * Change the state to SCI_REQ_STP_PIO_DATA_IN * and wait for PIO_SETUP fis / or D2H REg fis. */ - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE - ); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } break; + default: /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ scic_sds_request_set_status( sci_req, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR - ); + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state( - &sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED - ); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } @@ -1600,8 +1581,7 @@ static void scic_sds_stp_request_udma_complete_request( enum sci_status sci_status) { scic_sds_request_set_status(request, scu_status, sci_status); - sci_base_state_machine_change_state(&request->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&request->sm, SCI_REQ_COMPLETED); } static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, @@ -1632,8 +1612,9 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc return status; } -enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index) +enum sci_status +scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index) { struct scic_sds_controller *scic = sci_req->owning_controller; struct scic_sds_stp_request *stp_req = &sci_req->stp.req; @@ -1641,9 +1622,9 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r enum sci_status status; ssize_t word_cnt; - state = sci_req->state_machine.current_state_id; + state = sci_req->sm.current_state_id; switch (state) { - case SCI_BASE_REQUEST_STATE_STARTED: { + case SCI_REQ_STARTED: { struct ssp_frame_hdr ssp_hdr; void *frame_header; @@ -1684,20 +1665,21 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r } /* - * In any case we are done with this frame buffer return it to the - * controller + * In any case we are done with this frame buffer return it to + * the controller */ scic_sds_controller_release_frame(scic, frame_index); return SCI_SUCCESS; } - case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: + + case SCI_REQ_TASK_WAIT_TC_RESP: scic_sds_io_request_copy_response(sci_req); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); scic_sds_controller_release_frame(scic,frame_index); return SCI_SUCCESS; - case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: { + + case SCI_REQ_SMP_WAIT_RESP: { struct smp_resp *rsp_hdr = &sci_req->smp.rsp; void *frame_header; @@ -1725,32 +1707,40 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); + sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP); } else { - /* This was not a response frame why did it get forwarded? */ + /* + * This was not a response frame why did it get + * forwarded? + */ dev_err(scic_to_dev(scic), - "%s: SCIC SMP Request 0x%p received unexpected frame " - "%d type 0x%02x\n", __func__, sci_req, - frame_index, rsp_hdr->frame_type); + "%s: SCIC SMP Request 0x%p received unexpected " + "frame %d type 0x%02x\n", + __func__, + sci_req, + frame_index, + rsp_hdr->frame_type); scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_FRM_TYPE_ERR, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); } scic_sds_controller_release_frame(scic, frame_index); return SCI_SUCCESS; } - case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: - return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); - case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return scic_sds_stp_request_udma_general_frame_handler(sci_req, + frame_index); + + case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); + status = scic_sds_stp_request_udma_general_frame_handler(sci_req, + frame_index); if (status != SCI_SUCCESS) return status; @@ -1758,8 +1748,10 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r scic_sds_stp_request_udma_complete_request(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); + return SCI_SUCCESS; - case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: { + + case SCI_REQ_STP_NON_DATA_WAIT_D2H: { struct dev_to_host_fis *frame_header; u32 *frame_buffer; @@ -1769,9 +1761,12 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r if (status != SCI_SUCCESS) { dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); return status; } @@ -1802,15 +1797,15 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r break; } - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); return status; } - case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: { + + case SCI_REQ_STP_PIO_WAIT_FRAME: { struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct dev_to_host_fis *frame_header; @@ -1822,8 +1817,8 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r if (status != SCI_SUCCESS) { dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", __func__, stp_req, frame_index, status); return status; } @@ -1835,9 +1830,10 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r frame_index, (void **)&frame_buffer); - /* Get the data from the PIO Setup The SCU Hardware returns - * first word in the frame_header and the rest of the data is in - * the frame buffer so we need to back up one dword + /* Get the data from the PIO Setup The SCU Hardware + * returns first word in the frame_header and the rest + * of the data is in the frame buffer so we need to + * back up one dword */ /* transfer_count: first 16bits in the 4th dword */ @@ -1856,31 +1852,33 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r * request was PIO Data-in or Data out */ if (task->data_dir == DMA_FROM_DEVICE) { - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN); } else if (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */ status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); if (status != SCI_SUCCESS) break; - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT); } break; + case FIS_SETDEVBITS: - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; + case FIS_REGD2H: if (frame_header->status & ATA_BUSY) { - /* Now why is the drive sending a D2H Register FIS when - * it is still busy? Do nothing since we are still in - * the right state. + /* + * Now why is the drive sending a D2H Register + * FIS when it is still busy? Do nothing since + * we are still in the right state. */ dev_dbg(scic_to_dev(scic), "%s: SCIC PIO Request 0x%p received " "D2H Register FIS with BSY status " - "0x%x\n", __func__, stp_req, + "0x%x\n", + __func__, + stp_req, frame_header->status); break; } @@ -1897,9 +1895,9 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; + default: /* FIXME: what do we do here? */ break; @@ -1910,7 +1908,8 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r return status; } - case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: { + + case SCI_REQ_STP_PIO_DATA_IN: { struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer; @@ -1920,9 +1919,12 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r if (status != SCI_SUCCESS) { dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); return status; } @@ -1930,15 +1932,17 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r dev_err(scic_to_dev(scic), "%s: SCIC PIO Request 0x%p received frame %d " "with fis type 0x%02x when expecting a data " - "fis.\n", __func__, stp_req, frame_index, + "fis.\n", + __func__, + stp_req, + frame_index, frame_header->fis_type); scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); @@ -1972,15 +1976,14 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); } else { - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } return status; } - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: { + + case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: { struct dev_to_host_fis *frame_header; u32 *frame_buffer; @@ -1989,9 +1992,12 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r (void **)&frame_header); if (status != SCI_SUCCESS) { dev_err(scic_to_dev(scic), - "%s: SCIC IO Request 0x%p could not get frame header " - "for frame index %d, status %x\n", - __func__, stp_req, frame_index, status); + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); return status; } @@ -2010,35 +2016,43 @@ enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_r SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; + default: dev_warn(scic_to_dev(scic), "%s: IO Request:0x%p Frame Id:%d protocol " - "violation occurred\n", __func__, stp_req, + "violation occurred\n", + __func__, + stp_req, frame_index); - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + scic_sds_request_set_status(sci_req, + SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; } - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); return status; } - case SCI_BASE_REQUEST_STATE_ABORTING: - /* TODO: Is it even possible to get an unsolicited frame in the + case SCI_REQ_ABORTING: + /* + * TODO: Is it even possible to get an unsolicited frame in the * aborting state? */ scic_sds_controller_release_frame(scic, frame_index); return SCI_SUCCESS; + default: dev_warn(scic_to_dev(scic), - "%s: SCIC IO Request given unexpected frame %x while in " - "state %d\n", __func__, frame_index, state); + "%s: SCIC IO Request given unexpected frame %x while " + "in state %d\n", + __func__, + frame_index, + state); scic_sds_controller_release_frame(scic, frame_index); return SCI_FAILURE_INVALID_STATE; @@ -2075,8 +2089,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * * the device so we must change state to wait * for it */ - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H); } break; @@ -2105,45 +2118,45 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * return status; } -static enum sci_status stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); break; default: /* - * All other completion status cause the IO to be complete. If a NAK - * was received, then it is up to the user to retry the request. */ + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ scic_sds_request_set_status(sci_req, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + SCU_NORMALIZE_COMPLETION_STATUS(completion_code), + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } -static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event( - struct scic_sds_request *sci_req, - u32 completion_code) +static enum sci_status +stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req, + u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE); + sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); break; default: @@ -2155,8 +2168,7 @@ static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event( SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_COMPLETED); + sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); break; } @@ -2164,42 +2176,64 @@ static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event( } enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completion_code) +scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, + u32 completion_code) { enum sci_base_request_states state; struct scic_sds_controller *scic = sci_req->owning_controller; - state = sci_req->state_machine.current_state_id; + state = sci_req->sm.current_state_id; switch (state) { - case SCI_BASE_REQUEST_STATE_STARTED: - return request_started_state_tc_event(sci_req, completion_code); - case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: - return ssp_task_request_await_tc_event(sci_req, completion_code); - case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: - return smp_request_await_response_tc_event(sci_req, completion_code); - case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: - return smp_request_await_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: - return stp_request_udma_await_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: - return stp_request_non_data_await_h2d_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: - return stp_request_pio_await_h2d_completion_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: - return pio_data_out_tx_done_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: - return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, completion_code); - case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: - return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, completion_code); - case SCI_BASE_REQUEST_STATE_ABORTING: - return request_aborting_state_tc_event(sci_req, completion_code); - default: - dev_warn(scic_to_dev(scic), - "%s: SCIC IO Request given task completion notification %x " - "while in wrong state %d\n", __func__, completion_code, - state); - return SCI_FAILURE_INVALID_STATE; + case SCI_REQ_STARTED: + return request_started_state_tc_event(sci_req, completion_code); + + case SCI_REQ_TASK_WAIT_TC_COMP: + return ssp_task_request_await_tc_event(sci_req, + completion_code); + + case SCI_REQ_SMP_WAIT_RESP: + return smp_request_await_response_tc_event(sci_req, + completion_code); + + case SCI_REQ_SMP_WAIT_TC_COMP: + return smp_request_await_tc_event(sci_req, completion_code); + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return stp_request_udma_await_tc_event(sci_req, + completion_code); + + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + return stp_request_non_data_await_h2d_tc_event(sci_req, + completion_code); + + case SCI_REQ_STP_PIO_WAIT_H2D: + return stp_request_pio_await_h2d_completion_tc_event(sci_req, + completion_code); + + case SCI_REQ_STP_PIO_DATA_OUT: + return pio_data_out_tx_done_tc_event(sci_req, completion_code); + + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: + return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, + completion_code); + + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: + return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, + completion_code); + + case SCI_REQ_ABORTING: + return request_aborting_state_tc_event(sci_req, + completion_code); + + default: + dev_warn(scic_to_dev(scic), + "%s: SCIC IO Request given task completion " + "notification %x while in wrong state %d\n", + __func__, + completion_code, + state); + return SCI_FAILURE_INVALID_STATE; } } @@ -2896,7 +2930,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); struct isci_request *ireq = sci_req_to_ireq(sci_req); struct domain_device *dev = sci_dev_to_domain(sci_req->target_device); struct sas_task *task; @@ -2910,34 +2944,31 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * * substates */ if (!task && dev->dev_type == SAS_END_DEV) { - sci_base_state_machine_change_state(sm, - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); + sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); } else if (!task && (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { - sci_base_state_machine_change_state(sm, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); + sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { - sci_base_state_machine_change_state(sm, - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); + sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); } else if (task && sas_protocol_ata(task->task_proto) && !task->ata_task.use_ncq) { u32 state; if (task->data_dir == DMA_NONE) - state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE; + state = SCI_REQ_STP_NON_DATA_WAIT_H2D; else if (task->ata_task.dma_xfer) - state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE; + state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; else /* PIO */ - state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE; + state = SCI_REQ_STP_PIO_WAIT_H2D; - sci_base_state_machine_change_state(sm, state); + sci_change_state(sm, state); } } static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); struct scic_sds_controller *scic = sci_req->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); struct isci_request *ireq = sci_req_to_ireq(sci_req); @@ -2952,7 +2983,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); /* Setting the abort bit in the Task Context is required by the silicon. */ sci_req->task_context_buffer->abort = 1; @@ -2960,7 +2991,7 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); scic_sds_remote_device_set_working_request(sci_req->target_device, sci_req); @@ -2968,7 +2999,7 @@ static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(str static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); scic_sds_remote_device_set_working_request(sci_req->target_device, sci_req); @@ -2976,7 +3007,7 @@ static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct s static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); scic_sds_remote_device_set_working_request(sci_req->target_device, sci_req); @@ -2984,7 +3015,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completio static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); + struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); struct scu_task_context *task_context; struct host_to_dev_fis *h2d_fis; enum sci_status status; @@ -3003,51 +3034,53 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet } static const struct sci_base_state scic_sds_request_state_table[] = { - [SCI_BASE_REQUEST_STATE_INITIAL] = { }, - [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { }, - [SCI_BASE_REQUEST_STATE_STARTED] = { + [SCI_REQ_INIT] = { }, + [SCI_REQ_CONSTRUCTED] = { }, + [SCI_REQ_STARTED] = { .enter_state = scic_sds_request_started_state_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { + [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { + [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, + [SCI_REQ_STP_PIO_WAIT_H2D] = { .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { + [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, + [SCI_REQ_STP_PIO_DATA_IN] = { }, + [SCI_REQ_STP_PIO_DATA_OUT] = { }, + [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, + [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, + [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { + [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, }, - [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { }, - [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { }, - [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { }, - [SCI_BASE_REQUEST_STATE_COMPLETED] = { + [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, + [SCI_REQ_TASK_WAIT_TC_COMP] = { }, + [SCI_REQ_TASK_WAIT_TC_RESP] = { }, + [SCI_REQ_SMP_WAIT_RESP] = { }, + [SCI_REQ_SMP_WAIT_TC_COMP] = { }, + [SCI_REQ_COMPLETED] = { .enter_state = scic_sds_request_completed_state_enter, }, - [SCI_BASE_REQUEST_STATE_ABORTING] = { + [SCI_REQ_ABORTING] = { .enter_state = scic_sds_request_aborting_state_enter, }, - [SCI_BASE_REQUEST_STATE_FINAL] = { }, + [SCI_REQ_FINAL] = { }, }; -static void scic_sds_general_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) +static void +scic_sds_general_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + struct scic_sds_request *sci_req) { - sci_base_state_machine_construct(&sci_req->state_machine, + sci_base_state_machine_construct(&sci_req->sm, scic_sds_request_state_table, - SCI_BASE_REQUEST_STATE_INITIAL); - sci_base_state_machine_start(&sci_req->state_machine); + SCI_REQ_INIT); + sci_base_state_machine_start(&sci_req->sm); sci_req->io_tag = io_tag; sci_req->owning_controller = scic; @@ -3322,8 +3355,7 @@ scic_io_request_construct_smp(struct scic_sds_request *sci_req) scu_smp_request_construct_task_context(sci_req, smp_req->req_len); - sci_base_state_machine_change_state(&sci_req->state_machine, - SCI_BASE_REQUEST_STATE_CONSTRUCTED); + sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 31d6d571747..757cd99ae2e 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -96,37 +96,42 @@ struct scic_sds_stp_request { u32 udma; struct scic_sds_stp_pio_request { - /** - * Total transfer for the entire PIO request recorded at request constuction - * time. + /* + * Total transfer for the entire PIO request recorded + * at request constuction time. * - * @todo Should we just decrement this value for each byte of data transitted - * or received to elemenate the current_transfer_bytes field? + * @todo Should we just decrement this value for each + * byte of data transitted or received to elemenate + * the current_transfer_bytes field? */ u32 total_transfer_bytes; - /** - * Total number of bytes received/transmitted in data frames since the start - * of the IO request. At the end of the IO request this should equal the + /* + * Total number of bytes received/transmitted in data + * frames since the start of the IO request. At the + * end of the IO request this should equal the * total_transfer_bytes. */ u32 current_transfer_bytes; - /** - * The number of bytes requested in the in the PIO setup. + /* + * The number of bytes requested in the in the PIO + * setup. */ u32 pio_transfer_bytes; - /** - * PIO Setup ending status value to tell us if we need to wait for another FIS - * or if the transfer is complete. On the receipt of a D2H FIS this will be + /* + * PIO Setup ending status value to tell us if we need + * to wait for another FIS or if the transfer is + * complete. On the receipt of a D2H FIS this will be * the status field of that FIS. */ u8 ending_status; - /** - * On receipt of a D2H FIS this will be the ending error field if the - * ending_status has the SATA_STATUS_ERR bit set. + /* + * On receipt of a D2H FIS this will be the ending + * error field if the ending_status has the + * SATA_STATUS_ERR bit set. */ u8 ending_error; @@ -138,8 +143,9 @@ struct scic_sds_stp_request { } pio; struct { - /** - * The number of bytes requested in the PIO setup before CDB data frame. + /* + * The number of bytes requested in the PIO setup + * before CDB data frame. */ u32 device_preferred_cdb_length; } packet; @@ -147,57 +153,59 @@ struct scic_sds_stp_request { }; struct scic_sds_request { - /** - * This field contains the information for the base request state machine. + /* + * This field contains the information for the base request state + * machine. */ - struct sci_base_state_machine state_machine; + struct sci_base_state_machine sm; - /** + /* * This field simply points to the controller to which this IO request * is associated. */ struct scic_sds_controller *owning_controller; - /** - * This field simply points to the remote device to which this IO request - * is associated. + /* + * This field simply points to the remote device to which this IO + * request is associated. */ struct scic_sds_remote_device *target_device; - /** + /* * This field is utilized to determine if the SCI user is managing * the IO tag for this request or if the core is managing it. */ bool was_tag_assigned_by_user; - /** + /* * This field indicates the IO tag for this request. The IO tag is * comprised of the task_index and a sequence count. The sequence count * is utilized to help identify tasks from one life to another. */ u16 io_tag; - /** + /* * This field specifies the protocol being utilized for this * IO request. */ enum sci_request_protocol protocol; - /** + /* * This field indicates the completion status taken from the SCUs - * completion code. It indicates the completion result for the SCU hardware. + * completion code. It indicates the completion result for the SCU + * hardware. */ u32 scu_status; - /** - * This field indicates the completion status returned to the SCI user. It - * indicates the users view of the io request completion. + /* + * This field indicates the completion status returned to the SCI user. + * It indicates the users view of the io request completion. */ u32 sci_status; - /** - * This field contains the value to be utilized when posting (e.g. Post_TC, - * Post_TC_Abort) this request to the silicon. + /* + * This field contains the value to be utilized when posting + * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon. */ u32 post_context; @@ -208,26 +216,26 @@ struct scic_sds_request { #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); - /** + /* * This field indicates if this request is a task management request or * normal IO request. */ bool is_task_management_request; - /** - * This field is a pointer to the stored rx frame data. It is used in STP - * internal requests and SMP response frames. If this field is non-NULL the - * saved frame must be released on IO request completion. + /* + * This field is a pointer to the stored rx frame data. It is used in + * STP internal requests and SMP response frames. If this field is + * non-NULL the saved frame must be released on IO request completion. * * @todo In the future do we want to keep a list of RX frame buffers? */ u32 saved_rx_frame_index; - /** - * This field in the recorded device sequence for the io request. This is - * recorded during the build operation and is compared in the start - * operation. If the sequence is different then there was a change of - * devices from the build to start operations. + /* + * This field in the recorded device sequence for the io request. + * This is recorded during the build operation and is compared in the + * start operation. If the sequence is different then there was a + * change of devices from the build to start operations. */ u8 device_sequence; @@ -286,7 +294,7 @@ struct isci_request { dma_addr_t request_daddr; dma_addr_t zero_scatter_daddr; - unsigned int num_sg_entries; /* returned by pci_alloc_sg */ + unsigned int num_sg_entries; /* returned by pci_alloc_sg */ /** Note: "io_request_completion" is completed in two different ways * depending on whether this is a TMF or regular request. @@ -315,104 +323,105 @@ static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_ * */ enum sci_base_request_states { - /** + /* * Simply the initial state for the base request state machine. */ - SCI_BASE_REQUEST_STATE_INITIAL, + SCI_REQ_INIT, - /** - * This state indicates that the request has been constructed. This state - * is entered from the INITIAL state. + /* + * This state indicates that the request has been constructed. + * This state is entered from the INITIAL state. */ - SCI_BASE_REQUEST_STATE_CONSTRUCTED, + SCI_REQ_CONSTRUCTED, - /** - * This state indicates that the request has been started. This state is - * entered from the CONSTRUCTED state. + /* + * This state indicates that the request has been started. This state + * is entered from the CONSTRUCTED state. */ - SCI_BASE_REQUEST_STATE_STARTED, + SCI_REQ_STARTED, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE, + SCI_REQ_STP_UDMA_WAIT_TC_COMP, + SCI_REQ_STP_UDMA_WAIT_D2H, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE, + SCI_REQ_STP_NON_DATA_WAIT_H2D, + SCI_REQ_STP_NON_DATA_WAIT_D2H, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE, - SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE, + SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED, + SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG, + SCI_REQ_STP_SOFT_RESET_WAIT_D2H, - /** - * While in this state the IO request object is waiting for the TC completion - * notification for the H2D Register FIS + /* + * While in this state the IO request object is waiting for the TC + * completion notification for the H2D Register FIS */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE, + SCI_REQ_STP_PIO_WAIT_H2D, - /** - * While in this state the IO request object is waiting for either a PIO Setup - * FIS or a D2H register FIS. The type of frame received is based on the - * result of the prior frame and line conditions. + /* + * While in this state the IO request object is waiting for either a + * PIO Setup FIS or a D2H register FIS. The type of frame received is + * based on the result of the prior frame and line conditions. */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE, + SCI_REQ_STP_PIO_WAIT_FRAME, - /** - * While in this state the IO request object is waiting for a DATA frame from - * the device. + /* + * While in this state the IO request object is waiting for a DATA + * frame from the device. */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE, + SCI_REQ_STP_PIO_DATA_IN, - /** - * While in this state the IO request object is waiting to transmit the next data - * frame to the device. + /* + * While in this state the IO request object is waiting to transmit + * the next data frame to the device. */ - SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE, + SCI_REQ_STP_PIO_DATA_OUT, - /** + /* * The AWAIT_TC_COMPLETION sub-state indicates that the started raw * task management request is waiting for the transmission of the * initial frame (i.e. command, task, etc.). */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION, + SCI_REQ_TASK_WAIT_TC_COMP, - /** + /* * This sub-state indicates that the started task management request * is waiting for the reception of an unsolicited frame * (i.e. response IU). */ - SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE, + SCI_REQ_TASK_WAIT_TC_RESP, - /** + /* * This sub-state indicates that the started task management request * is waiting for the reception of an unsolicited frame * (i.e. response IU). */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE, + SCI_REQ_SMP_WAIT_RESP, - /** - * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is - * waiting for the transmission of the initial frame (i.e. command, task, etc.). + /* + * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP + * request is waiting for the transmission of the initial frame + * (i.e. command, task, etc.). */ - SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION, + SCI_REQ_SMP_WAIT_TC_COMP, - /** + /* * This state indicates that the request has completed. - * This state is entered from the STARTED state. This state is entered from - * the ABORTING state. + * This state is entered from the STARTED state. This state is entered + * from the ABORTING state. */ - SCI_BASE_REQUEST_STATE_COMPLETED, + SCI_REQ_COMPLETED, - /** + /* * This state indicates that the request is in the process of being * terminated/aborted. * This state is entered from the CONSTRUCTED state. * This state is entered from the STARTED state. */ - SCI_BASE_REQUEST_STATE_ABORTING, + SCI_REQ_ABORTING, - /** + /* * Simply the final state for the base request state machine. */ - SCI_BASE_REQUEST_STATE_FINAL, + SCI_REQ_FINAL, }; /** @@ -498,13 +507,18 @@ enum sci_base_request_states { enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); -enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, - u32 event_code); -enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, - u32 frame_index); -enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req); -extern enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req); -extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); +enum sci_status +scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, + u32 event_code); +enum sci_status +scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, + u32 frame_index); +enum sci_status +scic_sds_task_request_terminate(struct scic_sds_request *sci_req); +extern enum sci_status +scic_sds_request_complete(struct scic_sds_request *sci_req); +extern enum sci_status +scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); /* XXX open code in caller */ static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, @@ -523,8 +537,8 @@ static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, } /* XXX open code in caller */ -static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, - void *virt_addr) +static inline dma_addr_t +scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) { struct isci_request *ireq = sci_req_to_ireq(sci_req); @@ -543,9 +557,8 @@ static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *s * * status of the object as a isci_request_status enum. */ -static inline -enum isci_request_status isci_request_get_state( - struct isci_request *isci_request) +static inline enum isci_request_status +isci_request_get_state(struct isci_request *isci_request) { BUG_ON(isci_request == NULL); @@ -566,9 +579,9 @@ enum isci_request_status isci_request_get_state( * @status: This Parameter is the new status of the object * */ -static inline enum isci_request_status isci_request_change_state( - struct isci_request *isci_request, - enum isci_request_status status) +static inline enum isci_request_status +isci_request_change_state(struct isci_request *isci_request, + enum isci_request_status status) { enum isci_request_status old_state; unsigned long flags; @@ -597,10 +610,10 @@ static inline enum isci_request_status isci_request_change_state( * * state previous to any change. */ -static inline enum isci_request_status isci_request_change_started_to_newstate( - struct isci_request *isci_request, - struct completion *completion_ptr, - enum isci_request_status newstate) +static inline enum isci_request_status +isci_request_change_started_to_newstate(struct isci_request *isci_request, + struct completion *completion_ptr, + enum isci_request_status newstate) { enum isci_request_status old_state; unsigned long flags; @@ -615,6 +628,7 @@ static inline enum isci_request_status isci_request_change_started_to_newstate( isci_request->io_request_completion = completion_ptr; isci_request->status = newstate; } + spin_unlock_irqrestore(&isci_request->state_lock, flags); dev_dbg(&isci_request->isci_host->pdev->dev, @@ -635,13 +649,13 @@ static inline enum isci_request_status isci_request_change_started_to_newstate( * * state previous to any change. */ -static inline enum isci_request_status isci_request_change_started_to_aborted( - struct isci_request *isci_request, - struct completion *completion_ptr) +static inline enum isci_request_status +isci_request_change_started_to_aborted(struct isci_request *isci_request, + struct completion *completion_ptr) { - return isci_request_change_started_to_newstate( - isci_request, completion_ptr, aborted - ); + return isci_request_change_started_to_newstate(isci_request, + completion_ptr, + aborted); } /** * isci_request_free() - This function frees the request object. @@ -649,62 +663,33 @@ static inline enum isci_request_status isci_request_change_started_to_aborted( * @isci_request: This parameter points to the isci_request object * */ -static inline void isci_request_free( - struct isci_host *isci_host, - struct isci_request *isci_request) +static inline void isci_request_free(struct isci_host *isci_host, + struct isci_request *isci_request) { if (!isci_request) return; /* release the dma memory if we fail. */ - dma_pool_free(isci_host->dma_pool, isci_request, + dma_pool_free(isci_host->dma_pool, + isci_request, isci_request->request_daddr); } +#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) -/* #define ISCI_REQUEST_VALIDATE_ACCESS - */ - -#ifdef ISCI_REQUEST_VALIDATE_ACCESS - -static inline -struct sas_task *isci_request_access_task(struct isci_request *isci_request) -{ - BUG_ON(isci_request->ttype != io_task); - return isci_request->ttype_ptr.io_task_ptr; -} - -static inline -struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request) -{ - BUG_ON(isci_request->ttype != tmf_task); - return isci_request->ttype_ptr.tmf_task_ptr; -} - -#else /* not ISCI_REQUEST_VALIDATE_ACCESS */ - -#define isci_request_access_task(RequestPtr) \ - ((RequestPtr)->ttype_ptr.io_task_ptr) - -#define isci_request_access_tmf(RequestPtr) \ - ((RequestPtr)->ttype_ptr.tmf_task_ptr) - -#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */ - +#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) -int isci_request_alloc_tmf( - struct isci_host *isci_host, - struct isci_tmf *isci_tmf, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags); +int isci_request_alloc_tmf(struct isci_host *isci_host, + struct isci_tmf *isci_tmf, + struct isci_request **isci_request, + struct isci_remote_device *isci_device, + gfp_t gfp_flags); -int isci_request_execute( - struct isci_host *isci_host, - struct sas_task *task, - struct isci_request **request, - gfp_t gfp_flags); +int isci_request_execute(struct isci_host *isci_host, + struct sas_task *task, + struct isci_request **request, + gfp_t gfp_flags); /** * isci_request_unmap_sgl() - This function unmaps the DMA address of a given @@ -713,9 +698,8 @@ int isci_request_execute( * @*pdev: This Parameter is the pci_device struct for the controller * */ -static inline void isci_request_unmap_sgl( - struct isci_request *request, - struct pci_dev *pdev) +static inline void +isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev) { struct sas_task *task = isci_request_access_task(request); @@ -758,9 +742,9 @@ static inline void isci_request_unmap_sgl( * * pointer to the next sge for specified request. */ -static inline void *isci_request_io_request_get_next_sge( - struct isci_request *request, - void *current_sge_address) +static inline void * +isci_request_io_request_get_next_sge(struct isci_request *request, + void *current_sge_address) { struct sas_task *task = isci_request_access_task(request); void *ret = NULL; @@ -791,15 +775,20 @@ static inline void *isci_request_io_request_get_next_sge( return ret; } -void isci_terminate_pending_requests(struct isci_host *isci_host, - struct isci_remote_device *isci_device, - enum isci_request_status new_request_state); -enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, - u16 io_tag, - struct scic_sds_request *sci_req); -enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req); -enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req); -void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); +void +isci_terminate_pending_requests(struct isci_host *isci_host, + struct isci_remote_device *isci_device, + enum isci_request_status new_request_state); +enum sci_status +scic_task_request_construct(struct scic_sds_controller *scic, + struct scic_sds_remote_device *sci_dev, + u16 io_tag, + struct scic_sds_request *sci_req); +enum sci_status +scic_task_request_construct_ssp(struct scic_sds_request *sci_req); +enum sci_status +scic_task_request_construct_sata(struct scic_sds_request *sci_req); +void +scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/state_machine.c b/drivers/scsi/isci/state_machine.c index 1bcd925e502..8cfefb959f2 100644 --- a/drivers/scsi/isci/state_machine.c +++ b/drivers/scsi/isci/state_machine.c @@ -127,16 +127,7 @@ void sci_base_state_machine_stop( sci_state_machine_exit_state(sm); } -/** - * This method performs an update to the current state of the state machine. - * @sm: This parameter specifies the state machine for which - * the caller wishes to perform a state change. - * @next_state: This parameter specifies the new state for the state machine. - * - */ -void sci_base_state_machine_change_state( - struct sci_base_state_machine *sm, - u32 next_state) +void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) { sci_state_machine_exit_state(sm); @@ -145,18 +136,3 @@ void sci_base_state_machine_change_state( sci_state_machine_enter_state(sm); } - -/** - * This method simply returns the current state of the state machine to the - * caller. - * @sm: This parameter specifies the state machine for which to - * retrieve the current state. - * - * This method returns a u32 value indicating the current state for the - * supplied state machine. - */ -u32 sci_base_state_machine_get_state(struct sci_base_state_machine *sm) -{ - return sm->current_state_id; -} - diff --git a/drivers/scsi/isci/state_machine.h b/drivers/scsi/isci/state_machine.h index 067ed9126bf..6cb55a0adc5 100644 --- a/drivers/scsi/isci/state_machine.h +++ b/drivers/scsi/isci/state_machine.h @@ -117,8 +117,6 @@ void sci_base_state_machine_construct(struct sci_base_state_machine *sm, u32 initial_state); void sci_base_state_machine_start(struct sci_base_state_machine *sm); void sci_base_state_machine_stop(struct sci_base_state_machine *sm); -void sci_base_state_machine_change_state(struct sci_base_state_machine *sm, - u32 next_state); -u32 sci_base_state_machine_get_state(struct sci_base_state_machine *sm); +void sci_change_state(struct sci_base_state_machine *sm, u32 next_state); #endif /* _SCI_BASE_STATE_MACHINE_H_ */ -- cgit v1.2.3-70-g09d2 From 7c78da3175177c905a75c54b5830029c778494ea Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Wed, 1 Jun 2011 16:00:01 -0700 Subject: isci: remove 'min memory' infrastructure The old 'core' had aspirations of running in severely memory constrained environments like bios option-rom, it's not needed for Linux and gets in the way of other cleanups (like unifying/reducing the number of structure members in scic_sds_controller/isci_host). This also fixes a theoretical bug in that the driver would blindly override the silicon advertised limits for number of ports, task contexts, and remote node contexts. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 305 ++++++++------------------ drivers/scsi/isci/host.h | 13 -- drivers/scsi/isci/isci.h | 90 +------- drivers/scsi/isci/request.h | 2 +- drivers/scsi/isci/unsolicited_frame_control.c | 186 ++++------------ drivers/scsi/isci/unsolicited_frame_control.h | 20 +- 6 files changed, 144 insertions(+), 472 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 009c0ee83ed..41a7c5099de 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -70,46 +70,24 @@ #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 -/** - * smu_dcc_get_max_ports() - - * - * This macro returns the maximum number of logical ports supported by the - * hardware. The caller passes in the value read from the device context - * capacity register and this macro will mash and shift the value appropriately. - */ -#define smu_dcc_get_max_ports(dcc_value) \ +#define smu_max_ports(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ ) -/** - * smu_dcc_get_max_task_context() - - * - * This macro returns the maximum number of task contexts supported by the - * hardware. The caller passes in the value read from the device context - * capacity register and this macro will mash and shift the value appropriately. - */ -#define smu_dcc_get_max_task_context(dcc_value) \ +#define smu_max_task_contexts(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ ) -/** - * smu_dcc_get_max_remote_node_context() - - * - * This macro returns the maximum number of remote node contexts supported by - * the hardware. The caller passes in the value read from the device context - * capacity register and this macro will mash and shift the value appropriately. - */ -#define smu_dcc_get_max_remote_node_context(dcc_value) \ +#define smu_max_rncs(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ ) - #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 /** @@ -153,9 +131,8 @@ INCREMENT_QUEUE_GET(\ (index), \ (cycle), \ - (controller)->completion_queue_entries, \ - SMU_CQGR_CYCLE_BIT \ - ) + SCU_MAX_COMPLETION_QUEUE_ENTRIES, \ + SMU_CQGR_CYCLE_BIT) /** * INCREMENT_EVENT_QUEUE_GET() - @@ -167,7 +144,7 @@ INCREMENT_QUEUE_GET(\ (index), \ (cycle), \ - (controller)->completion_event_entries, \ + SCU_MAX_EVENTS, \ SMU_CQGR_EVENT_CYCLE_BIT \ ) @@ -843,10 +820,9 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont scic->completion_queue_get = 0; - completion_queue_control_value = ( - SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1) - | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1) - ); + completion_queue_control_value = + (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | + SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); writel(completion_queue_control_value, &scic->smu_registers->completion_queue_control); @@ -873,7 +849,7 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont &scic->smu_registers->completion_queue_put); /* Initialize the cycle bit of the completion queue entries */ - for (index = 0; index < scic->completion_queue_entries; index++) { + for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { /* * If get.cycle_bit != completion_queue.cycle_bit * its not a valid completion queue entry @@ -890,8 +866,7 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s /* Write the queue size */ frame_queue_control_value = - SCU_UFQC_GEN_VAL(QUEUE_SIZE, - scic->uf_control.address_table.count); + SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); writel(frame_queue_control_value, &scic->scu_registers->sdma.unsolicited_frame_queue_control); @@ -1863,15 +1838,6 @@ static enum sci_status scic_controller_construct(struct scic_sds_controller *sci sci_init_timer(&scic->timer, controller_timeout); - /* Set the default maximum values */ - scic->completion_event_entries = SCU_EVENT_COUNT; - scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT; - scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; - scic->logical_port_entries = SCI_MAX_PORTS; - scic->task_context_entries = SCU_IO_REQUEST_COUNT; - scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT; - scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT; - /* Initialize the User and OEM parameters to default values. */ scic_sds_controller_set_default_config_parameters(scic); @@ -2207,44 +2173,6 @@ static void scic_sds_controller_afe_initialization(struct scic_sds_controller *s udelay(AFE_REGISTER_WRITE_DELAY); } -static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic, - enum sci_controller_mode operating_mode) -{ - enum sci_status status = SCI_SUCCESS; - - if ((scic->sm.current_state_id == SCIC_INITIALIZING) || - (scic->sm.current_state_id == SCIC_INITIALIZED)) { - switch (operating_mode) { - case SCI_MODE_SPEED: - scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; - scic->task_context_entries = SCU_IO_REQUEST_COUNT; - scic->uf_control.buffers.count = - SCU_UNSOLICITED_FRAME_COUNT; - scic->completion_event_entries = SCU_EVENT_COUNT; - scic->completion_queue_entries = - SCU_COMPLETION_QUEUE_COUNT; - break; - - case SCI_MODE_SIZE: - scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES; - scic->task_context_entries = SCI_MIN_IO_REQUESTS; - scic->uf_control.buffers.count = - SCU_MIN_UNSOLICITED_FRAMES; - scic->completion_event_entries = SCU_MIN_EVENTS; - scic->completion_queue_entries = - SCU_MIN_COMPLETION_QUEUE_ENTRIES; - break; - - default: - status = SCI_FAILURE_INVALID_PARAMETER_VALUE; - break; - } - } else - status = SCI_FAILURE_INVALID_STATE; - - return status; -} - static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) { sci_init_timer(&scic->power_control.timer, power_control_timeout); @@ -2259,9 +2187,9 @@ static void scic_sds_controller_initialize_power_control(struct scic_sds_control static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) { struct sci_base_state_machine *sm = &scic->sm; - enum sci_status result = SCI_SUCCESS; struct isci_host *ihost = scic_to_ihost(scic); - u32 index, state; + enum sci_status result = SCI_FAILURE; + unsigned long i, state, val; if (scic->sm.current_state_id != SCIC_RESET) { dev_warn(scic_to_dev(scic), @@ -2286,133 +2214,81 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc * / presently they seem to be wrong. */ scic_sds_controller_afe_initialization(scic); - if (result == SCI_SUCCESS) { - u32 status; - u32 terminate_loop; - - /* Take the hardware out of reset */ - writel(0, &scic->smu_registers->soft_reset_control); - /* - * / @todo Provide meaningfull error code for hardware failure - * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ - result = SCI_FAILURE; - terminate_loop = 100; - - while (terminate_loop-- && (result != SCI_SUCCESS)) { - /* Loop until the hardware reports success */ - udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); - status = readl(&scic->smu_registers->control_status); - - if ((status & SCU_RAM_INIT_COMPLETED) == - SCU_RAM_INIT_COMPLETED) - result = SCI_SUCCESS; - } - } - - if (result == SCI_SUCCESS) { - u32 max_supported_ports; - u32 max_supported_devices; - u32 max_supported_io_requests; - u32 device_context_capacity; + /* Take the hardware out of reset */ + writel(0, &scic->smu_registers->soft_reset_control); - /* - * Determine what are the actaul device capacities that the - * hardware will support */ - device_context_capacity = - readl(&scic->smu_registers->device_context_capacity); - - - max_supported_ports = smu_dcc_get_max_ports(device_context_capacity); - max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity); - max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity); + /* + * / @todo Provide meaningfull error code for hardware failure + * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ + for (i = 100; i >= 1; i--) { + u32 status; - /* - * Make all PEs that are unassigned match up with the - * logical ports - */ - for (index = 0; index < max_supported_ports; index++) { - struct scu_port_task_scheduler_group_registers __iomem - *ptsg = &scic->scu_registers->peg0.ptsg; + /* Loop until the hardware reports success */ + udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); + status = readl(&scic->smu_registers->control_status); - writel(index, &ptsg->protocol_engine[index]); - } + if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) + break; + } + if (i == 0) + goto out; - /* Record the smaller of the two capacity values */ - scic->logical_port_entries = - min(max_supported_ports, scic->logical_port_entries); + /* + * Determine what are the actaul device capacities that the + * hardware will support */ + val = readl(&scic->smu_registers->device_context_capacity); - scic->task_context_entries = - min(max_supported_io_requests, - scic->task_context_entries); + /* Record the smaller of the two capacity values */ + scic->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); + scic->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); + scic->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); - scic->remote_node_entries = - min(max_supported_devices, scic->remote_node_entries); + /* + * Make all PEs that are unassigned match up with the + * logical ports + */ + for (i = 0; i < scic->logical_port_entries; i++) { + struct scu_port_task_scheduler_group_registers __iomem + *ptsg = &scic->scu_registers->peg0.ptsg; - /* - * Now that we have the correct hardware reported minimum values - * build the MDL for the controller. Default to a performance - * configuration. - */ - scic_controller_set_mode(scic, SCI_MODE_SPEED); + writel(i, &ptsg->protocol_engine[i]); } /* Initialize hardware PCI Relaxed ordering in DMA engines */ - if (result == SCI_SUCCESS) { - u32 dma_configuration; - - /* Configure the payload DMA */ - dma_configuration = - readl(&scic->scu_registers->sdma.pdma_configuration); - dma_configuration |= - SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); - writel(dma_configuration, - &scic->scu_registers->sdma.pdma_configuration); - - /* Configure the control DMA */ - dma_configuration = - readl(&scic->scu_registers->sdma.cdma_configuration); - dma_configuration |= - SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); - writel(dma_configuration, - &scic->scu_registers->sdma.cdma_configuration); - } + val = readl(&scic->scu_registers->sdma.pdma_configuration); + val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &scic->scu_registers->sdma.pdma_configuration); + + val = readl(&scic->scu_registers->sdma.cdma_configuration); + val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &scic->scu_registers->sdma.cdma_configuration); /* * Initialize the PHYs before the PORTs because the PHY registers * are accessed during the port initialization. */ - if (result == SCI_SUCCESS) { - /* Initialize the phys */ - for (index = 0; - (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS); - index++) { - result = scic_sds_phy_initialize( - &ihost->phys[index].sci, - &scic->scu_registers->peg0.pe[index].tl, - &scic->scu_registers->peg0.pe[index].ll); - } + for (i = 0; i < SCI_MAX_PHYS; i++) { + result = scic_sds_phy_initialize(&ihost->phys[i].sci, + &scic->scu_registers->peg0.pe[i].tl, + &scic->scu_registers->peg0.pe[i].ll); + if (result != SCI_SUCCESS) + goto out; } - if (result == SCI_SUCCESS) { - /* Initialize the logical ports */ - for (index = 0; - (index < scic->logical_port_entries) && - (result == SCI_SUCCESS); - index++) { - result = scic_sds_port_initialize( - &ihost->ports[index].sci, - &scic->scu_registers->peg0.ptsg.port[index], - &scic->scu_registers->peg0.ptsg.protocol_engine, - &scic->scu_registers->peg0.viit[index]); - } + for (i = 0; i < scic->logical_port_entries; i++) { + result = scic_sds_port_initialize(&ihost->ports[i].sci, + &scic->scu_registers->peg0.ptsg.port[i], + &scic->scu_registers->peg0.ptsg.protocol_engine, + &scic->scu_registers->peg0.viit[i]); + + if (result != SCI_SUCCESS) + goto out; } - if (result == SCI_SUCCESS) - result = scic_sds_port_configuration_agent_initialize( - scic, - &scic->port_agent); + result = scic_sds_port_configuration_agent_initialize(scic, &scic->port_agent); + out: /* Advance the controller state machine */ if (result == SCI_SUCCESS) state = SCIC_INITIALIZED; @@ -2480,47 +2356,38 @@ static enum sci_status scic_user_parameters_set( static int scic_controller_mem_init(struct scic_sds_controller *scic) { struct device *dev = scic_to_dev(scic); - dma_addr_t dma_handle; - enum sci_status result; + dma_addr_t dma; + size_t size; + int err; - scic->completion_queue = dmam_alloc_coherent(dev, - scic->completion_queue_entries * sizeof(u32), - &dma_handle, GFP_KERNEL); + size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); + scic->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!scic->completion_queue) return -ENOMEM; - writel(lower_32_bits(dma_handle), - &scic->smu_registers->completion_queue_lower); - writel(upper_32_bits(dma_handle), - &scic->smu_registers->completion_queue_upper); + writel(lower_32_bits(dma), &scic->smu_registers->completion_queue_lower); + writel(upper_32_bits(dma), &scic->smu_registers->completion_queue_upper); - scic->remote_node_context_table = dmam_alloc_coherent(dev, - scic->remote_node_entries * - sizeof(union scu_remote_node_context), - &dma_handle, GFP_KERNEL); + size = scic->remote_node_entries * sizeof(union scu_remote_node_context); + scic->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, + GFP_KERNEL); if (!scic->remote_node_context_table) return -ENOMEM; - writel(lower_32_bits(dma_handle), - &scic->smu_registers->remote_node_context_lower); - writel(upper_32_bits(dma_handle), - &scic->smu_registers->remote_node_context_upper); + writel(lower_32_bits(dma), &scic->smu_registers->remote_node_context_lower); + writel(upper_32_bits(dma), &scic->smu_registers->remote_node_context_upper); - scic->task_context_table = dmam_alloc_coherent(dev, - scic->task_context_entries * - sizeof(struct scu_task_context), - &dma_handle, GFP_KERNEL); + size = scic->task_context_entries * sizeof(struct scu_task_context), + scic->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!scic->task_context_table) return -ENOMEM; - writel(lower_32_bits(dma_handle), - &scic->smu_registers->host_task_table_lower); - writel(upper_32_bits(dma_handle), - &scic->smu_registers->host_task_table_upper); + writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); + writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); - result = scic_sds_unsolicited_frame_control_construct(scic); - if (result) - return result; + err = scic_sds_unsolicited_frame_control_construct(scic); + if (err) + return err; /* * Inform the silicon as to the location of the UF headers and diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 04698dd75ad..740350043d8 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -239,18 +239,6 @@ struct scic_sds_controller { */ u32 logical_port_entries; - /** - * This field is the minimum number of hardware supported completion queue - * entries and the software requested completion queue entries. - */ - u32 completion_queue_entries; - - /** - * This field is the minimum number of hardware supported event entries and - * the software requested event entries. - */ - u32 completion_event_entries; - /** * This field is the minimum number of devices supported by the hardware and * the number of devices requested by the software. @@ -325,7 +313,6 @@ struct isci_host { union scic_oem_parameters oem_parameters; int id; /* unique within a given pci device */ - void *core_ctrl_memory; struct dma_pool *dma_pool; struct isci_phy phys[SCI_MAX_PHYS]; struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 80cfb45f8da..714ed926171 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -78,39 +78,16 @@ enum sci_controller_mode { SCI_MODE_SIZE /* deprecated */ }; -#define SCI_MAX_PHYS (4) +#define SCI_MAX_PHYS (4UL) #define SCI_MAX_PORTS SCI_MAX_PHYS -#define SCI_MIN_SMP_PHYS (38) #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ -#define SCI_MAX_REMOTE_DEVICES (256) -#define SCI_MIN_REMOTE_DEVICES (16) -#define SCI_MAX_IO_REQUESTS (256) -#define SCI_MIN_IO_REQUESTS (1) +#define SCI_MAX_REMOTE_DEVICES (256UL) +#define SCI_MAX_IO_REQUESTS (256UL) #define SCI_MAX_MSIX_MESSAGES (2) #define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */ -#define SCI_MIN_SCATTER_GATHER_ELEMENTS 1 #define SCI_MAX_CONTROLLERS 2 #define SCI_MAX_DOMAINS SCI_MAX_PORTS -/* 2 indicates the maximum number of UFs that can occur for a given IO request. - * The hardware handles reception of additional unsolicited frames while all - * UFs are in use, by holding off the transmitting device. This number could - * be theoretically reduced to 1, but 2 provides for more reliable operation. - * During SATA PIO operation, it is possible under some conditions for there to - * be 3 separate FISes received, back to back to back (PIO Setup, Data, D2H - * Register). It is unlikely to have all 3 pending all at once without some of - * them already being processed. - */ -#define SCU_MIN_UNSOLICITED_FRAMES (1) -#define SCU_MIN_CRITICAL_NOTIFICATIONS (24) -#define SCU_MIN_EVENTS (4) -#define SCU_MIN_COMPLETION_QUEUE_SCRATCH (2) -#define SCU_MIN_COMPLETION_QUEUE_ENTRIES (SCU_MIN_CRITICAL_NOTIFICATIONS \ - + SCU_MIN_EVENTS \ - + SCU_MIN_UNSOLICITED_FRAMES \ - + SCI_MIN_IO_REQUESTS \ - + SCU_MIN_COMPLETION_QUEUE_SCRATCH) - #define SCU_MAX_CRITICAL_NOTIFICATIONS (384) #define SCU_MAX_EVENTS (128) #define SCU_MAX_UNSOLICITED_FRAMES (128) @@ -121,51 +98,6 @@ enum sci_controller_mode { + SCI_MAX_IO_REQUESTS \ + SCU_MAX_COMPLETION_QUEUE_SCRATCH) -#if !defined(ENABLE_MINIMUM_MEMORY_MODE) -#define SCU_UNSOLICITED_FRAME_COUNT SCU_MAX_UNSOLICITED_FRAMES -#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MAX_CRITICAL_NOTIFICATIONS -#define SCU_EVENT_COUNT SCU_MAX_EVENTS -#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MAX_COMPLETION_QUEUE_SCRATCH -#define SCU_IO_REQUEST_COUNT SCI_MAX_IO_REQUESTS -#define SCU_IO_REQUEST_SGE_COUNT SCI_MAX_SCATTER_GATHER_ELEMENTS -#define SCU_COMPLETION_QUEUE_COUNT SCU_MAX_COMPLETION_QUEUE_ENTRIES -#else -#define SCU_UNSOLICITED_FRAME_COUNT SCU_MIN_UNSOLICITED_FRAMES -#define SCU_CRITICAL_NOTIFICATION_COUNT SCU_MIN_CRITICAL_NOTIFICATIONS -#define SCU_EVENT_COUNT SCU_MIN_EVENTS -#define SCU_COMPLETION_QUEUE_SCRATCH SCU_MIN_COMPLETION_QUEUE_SCRATCH -#define SCU_IO_REQUEST_COUNT SCI_MIN_IO_REQUESTS -#define SCU_IO_REQUEST_SGE_COUNT SCI_MIN_SCATTER_GATHER_ELEMENTS -#define SCU_COMPLETION_QUEUE_COUNT SCU_MIN_COMPLETION_QUEUE_ENTRIES -#endif /* !defined(ENABLE_MINIMUM_MEMORY_OPERATION) */ - -/** - * - * - * The SCU_COMPLETION_QUEUE_COUNT constant indicates the size of the completion - * queue into which the hardware DMAs 32-bit quantas (completion entries). - */ - -/** - * - * - * This queue must be programmed to a power of 2 size (e.g. 32, 64, 1024, etc.). - */ -#if (SCU_COMPLETION_QUEUE_COUNT != 16) && \ - (SCU_COMPLETION_QUEUE_COUNT != 32) && \ - (SCU_COMPLETION_QUEUE_COUNT != 64) && \ - (SCU_COMPLETION_QUEUE_COUNT != 128) && \ - (SCU_COMPLETION_QUEUE_COUNT != 256) && \ - (SCU_COMPLETION_QUEUE_COUNT != 512) && \ - (SCU_COMPLETION_QUEUE_COUNT != 1024) -#error "SCU_COMPLETION_QUEUE_COUNT must be set to a power of 2." -#endif - -#if SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES -#error "Invalid configuration of unsolicited frame constants" -#endif /* SCU_MIN_UNSOLICITED_FRAMES > SCU_MAX_UNSOLICITED_FRAMES */ - -#define SCU_MIN_UF_TABLE_ENTRIES (8) #define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) #define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) #define SCU_INVALID_FRAME_INDEX (0xFFFF) @@ -173,14 +105,14 @@ enum sci_controller_mode { #define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) #define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF) -/* - * Determine the size of the unsolicited frame array including - * unused buffers. */ -#if SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES -#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MIN_UF_TABLE_ENTRIES -#else -#define SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE SCU_MAX_UNSOLICITED_FRAMES -#endif /* SCU_UNSOLICITED_FRAME_COUNT <= SCU_MIN_UF_TABLE_ENTRIES */ +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES); +} /** * enum sci_status - This is the general return status enumeration for non-IO, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 757cd99ae2e..547c35cbe45 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -213,7 +213,7 @@ struct scic_sds_request { struct scu_task_context tc ____cacheline_aligned; /* could be larger with sg chaining */ - #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2) + #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); /* diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 12e67634638..d89570700ff 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -57,120 +57,30 @@ #include "unsolicited_frame_control.h" #include "registers.h" -/** - * This method will program the unsolicited frames (UFs) into the UF address - * table and construct the UF frame structure being modeled in the core. It - * will handle the case where some of the UFs are not being used and thus - * should have entries programmed to zero in the address table. - * @uf_control: This parameter specifies the unsolicted frame control object - * for which to construct the unsolicited frames objects. - * @uf_buffer_phys_address: This parameter specifies the physical address for - * the first unsolicited frame buffer. - * @uf_buffer_virt_address: This parameter specifies the virtual address for - * the first unsolicited frame buffer. - * @unused_uf_header_entries: This parameter specifies the number of unused UF - * headers. This value can be non-zero when there are a non-power of 2 - * number of unsolicited frames being supported. - * @used_uf_header_entries: This parameter specifies the number of actually - * utilized UF headers. - * - */ -static void scic_sds_unsolicited_frame_control_construct_frames( - struct scic_sds_unsolicited_frame_control *uf_control, - dma_addr_t uf_buffer_phys_address, - void *uf_buffer_virt_address, - u32 unused_uf_header_entries, - u32 used_uf_header_entries) -{ - u32 index; - struct scic_sds_unsolicited_frame *uf; - - /* - * Program the unused buffers into the UF address table and the - * controller's array of UFs. - */ - for (index = 0; index < unused_uf_header_entries; index++) { - uf = &uf_control->buffers.array[index]; - - uf->buffer = NULL; - uf_control->address_table.array[index] = 0; - uf->header = &uf_control->headers.array[index]; - uf->state = UNSOLICITED_FRAME_EMPTY; - } - - /* - * Program the actual used UF buffers into the UF address table and - * the controller's array of UFs. - */ - for (index = unused_uf_header_entries; - index < unused_uf_header_entries + used_uf_header_entries; - index++) { - uf = &uf_control->buffers.array[index]; - - uf_control->address_table.array[index] = uf_buffer_phys_address; - - uf->buffer = uf_buffer_virt_address; - uf->header = &uf_control->headers.array[index]; - uf->state = UNSOLICITED_FRAME_EMPTY; - - /* - * Increment the address of the physical and virtual memory - * pointers. Everything is aligned on 1k boundary with an - * increment of 1k. - */ - uf_buffer_virt_address += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; - uf_buffer_phys_address += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; - } -} - int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *scic) { struct scic_sds_unsolicited_frame_control *uf_control = &scic->uf_control; - u32 unused_uf_header_entries; - u32 used_uf_header_entries; - u32 used_uf_buffer_bytes; - u32 unused_uf_header_bytes; - u32 used_uf_header_bytes; - dma_addr_t uf_buffer_phys_address; - void *uf_buffer_virt_address; + struct scic_sds_unsolicited_frame *uf; + u32 buf_len, header_len, i; + dma_addr_t dma; size_t size; - - /* - * The UF buffer address table size must be programmed to a power - * of 2. Find the first power of 2 that is equal to or greater then - * the number of unsolicited frame buffers to be utilized. - */ - uf_control->address_table.count = SCU_MIN_UF_TABLE_ENTRIES; - while (uf_control->address_table.count < uf_control->buffers.count && - uf_control->address_table.count < SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES) - uf_control->address_table.count <<= 1; + void *virt; /* * Prepare all of the memory sizes for the UF headers, UF address * table, and UF buffers themselves. */ - used_uf_buffer_bytes = uf_control->buffers.count - * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; - unused_uf_header_entries = uf_control->address_table.count - - uf_control->buffers.count; - used_uf_header_entries = uf_control->buffers.count; - unused_uf_header_bytes = unused_uf_header_entries - * sizeof(struct scu_unsolicited_frame_header); - used_uf_header_bytes = used_uf_header_entries - * sizeof(struct scu_unsolicited_frame_header); - - size = used_uf_buffer_bytes + used_uf_header_bytes + - uf_control->address_table.count * sizeof(dma_addr_t); - + buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); /* * The Unsolicited Frame buffers are set at the start of the UF * memory descriptor entry. The headers and address table will be * placed after the buffers. */ - uf_buffer_virt_address = dmam_alloc_coherent(scic_to_dev(scic), size, - &uf_buffer_phys_address, GFP_KERNEL); - if (!uf_buffer_virt_address) + virt = dmam_alloc_coherent(scic_to_dev(scic), size, &dma, GFP_KERNEL); + if (!virt) return -ENOMEM; /* @@ -183,15 +93,8 @@ int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *sci * headers, since we program the UF address table pointers to * NULL. */ - uf_control->headers.physical_address = - uf_buffer_phys_address + - used_uf_buffer_bytes - - unused_uf_header_bytes; - - uf_control->headers.array = - uf_buffer_virt_address + - used_uf_buffer_bytes - - unused_uf_header_bytes; + uf_control->headers.physical_address = dma + buf_len; + uf_control->headers.array = virt + buf_len; /* * Program the location of the UF address table into the SCU. @@ -200,16 +103,8 @@ int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *sci * byte boundary already due to above programming headers being on a * 64-bit boundary and headers are on a 64-bytes in size. */ - uf_control->address_table.physical_address = - uf_buffer_phys_address + - used_uf_buffer_bytes + - used_uf_header_bytes; - - uf_control->address_table.array = - uf_buffer_virt_address + - used_uf_buffer_bytes + - used_uf_header_bytes; - + uf_control->address_table.physical_address = dma + buf_len + header_len; + uf_control->address_table.array = virt + buf_len + header_len; uf_control->get = 0; /* @@ -220,16 +115,26 @@ int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *sci * - Aligned on a 1KB boundary. */ /* - * If the user provided less then the maximum amount of memory, - * then be sure that we programm the first entries in the UF - * address table to NULL. */ - scic_sds_unsolicited_frame_control_construct_frames( - uf_control, - uf_buffer_phys_address, - uf_buffer_virt_address, - unused_uf_header_entries, - used_uf_header_entries - ); + * Program the actual used UF buffers into the UF address table and + * the controller's array of UFs. + */ + for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) { + uf = &uf_control->buffers.array[i]; + + uf_control->address_table.array[i] = dma; + + uf->buffer = virt; + uf->header = &uf_control->headers.array[i]; + uf->state = UNSOLICITED_FRAME_EMPTY; + + /* + * Increment the address of the physical and virtual memory + * pointers. Everything is aligned on 1k boundary with an + * increment of 1k. + */ + virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + } return 0; } @@ -247,7 +152,7 @@ enum sci_status scic_sds_unsolicited_frame_control_get_header( u32 frame_index, void **frame_header) { - if (frame_index < uf_control->address_table.count) { + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { /* * Skip the first word in the frame since this is a controll word used * by the hardware. */ @@ -272,7 +177,7 @@ enum sci_status scic_sds_unsolicited_frame_control_get_buffer( u32 frame_index, void **frame_buffer) { - if (frame_index < uf_control->address_table.count) { + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { *frame_buffer = uf_control->buffers.array[frame_index].buffer; return SCI_SUCCESS; @@ -298,26 +203,24 @@ bool scic_sds_unsolicited_frame_control_release_frame( u32 frame_get; u32 frame_cycle; - frame_get = uf_control->get & (uf_control->address_table.count - 1); - frame_cycle = uf_control->get & uf_control->address_table.count; + frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1); + frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES; /* * In the event there are NULL entries in the UF table, we need to * advance the get pointer in order to find out if this frame should * be released (i.e. update the get pointer). */ - while (((lower_32_bits(uf_control->address_table.array[frame_get]) - == 0) && - (upper_32_bits(uf_control->address_table.array[frame_get]) - == 0)) && - (frame_get < uf_control->address_table.count)) + while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 && + upper_32_bits(uf_control->address_table.array[frame_get]) == 0 && + frame_get < SCU_MAX_UNSOLICITED_FRAMES) frame_get++; /* * The table has a NULL entry as it's last element. This is * illegal. */ - BUG_ON(frame_get >= uf_control->address_table.count); + BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES); - if (frame_index < uf_control->address_table.count) { + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED; /* @@ -333,9 +236,8 @@ bool scic_sds_unsolicited_frame_control_release_frame( INCREMENT_QUEUE_GET( frame_get, frame_cycle, - uf_control->address_table.count - 1, - uf_control->address_table.count - ); + SCU_MAX_UNSOLICITED_FRAMES - 1, + SCU_MAX_UNSOLICITED_FRAMES); } uf_control->get = diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 0d8ca8c4770..2954904f025 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -144,25 +144,18 @@ struct scic_sds_uf_header_array { */ struct scic_sds_uf_buffer_array { /** - * This field is the minimum number of unsolicited frames supported by the - * hardware and the number of unsolicited frames requested by the software. - */ - u32 count; - - /** - * This field is the SCIC_UNSOLICITED_FRAME data its used to manage + * This field is the unsolicited frame data its used to manage * the data for the unsolicited frame requests. It also represents * the virtual address location that corresponds to the * physical_address field. */ - struct scic_sds_unsolicited_frame array[SCU_UNSOLICITED_FRAME_CONTROL_ARRAY_SIZE]; + struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; /** * This field specifies the physical address location for the UF * buffer array. */ dma_addr_t physical_address; - }; /** @@ -173,15 +166,6 @@ struct scic_sds_uf_buffer_array { * 1KB buffers into which the silicon will DMA unsolicited frames. */ struct scic_sds_uf_address_table_array { - /** - * This field specifies the actual programmed size of the - * unsolicited frame buffer address table. The size of the table - * can be larger than the actual number of UF buffers, but it must - * be a power of 2 and the last entry in the table is not allowed - * to be NULL. - */ - u32 count; - /** * This field represents a virtual pointer that refers to the * starting address of the UF address table. -- cgit v1.2.3-70-g09d2 From 980d3aeb3828b0fdf2a0b2e893d238130b014575 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Mon, 20 Jun 2011 15:11:22 -0700 Subject: isci: fix isci_terminate_pending() list management Walk through the list of pending requests being careful to consider that multiple requests can be terminated when the lock is dropped (i.e. invalidating the 'next' reference established by list_for_each_entry_safe). Also noticed that all callers to isci_terminate_pending_requests() specifying terminating, so just drop the parameter. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/remote_device.c | 2 +- drivers/scsi/isci/request.h | 5 +- drivers/scsi/isci/task.c | 131 +++++++++++++++++++------------------- 3 files changed, 67 insertions(+), 71 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 3b555dcbe56..45592ad33c3 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1272,7 +1272,7 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remot "%s: idev = %p\n", __func__, idev); /* Cleanup all requests pending for this device. */ - isci_terminate_pending_requests(ihost, idev, terminating); + isci_terminate_pending_requests(ihost, idev); dev_dbg(&ihost->pdev->dev, "%s: idev = %p, done\n", __func__, idev); diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 547c35cbe45..ac9368c5a6b 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -776,9 +776,8 @@ isci_request_io_request_get_next_sge(struct isci_request *request, } void -isci_terminate_pending_requests(struct isci_host *isci_host, - struct isci_remote_device *isci_device, - enum isci_request_status new_request_state); +isci_terminate_pending_requests(struct isci_host *ihost, + struct isci_remote_device *idev); enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 573cf1c9e81..01032dc2c11 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -796,81 +796,79 @@ static void isci_terminate_request_core( * @isci_host: This parameter specifies SCU. * @isci_device: This parameter specifies the target. * - * */ -void isci_terminate_pending_requests( - struct isci_host *isci_host, - struct isci_remote_device *isci_device, - enum isci_request_status new_request_state) +void isci_terminate_pending_requests(struct isci_host *ihost, + struct isci_remote_device *idev) { - struct isci_request *request; - struct isci_request *next_request; - unsigned long flags; + struct completion request_completion; enum isci_request_status old_state; - DECLARE_COMPLETION_ONSTACK(request_completion); - - dev_dbg(&isci_host->pdev->dev, - "%s: isci_device = %p (new request state = %d)\n", - __func__, isci_device, new_request_state); + unsigned long flags; + LIST_HEAD(list); - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); + list_splice_init(&idev->reqs_in_process, &list); - /* Iterate through the list. */ - list_for_each_entry_safe(request, next_request, - &isci_device->reqs_in_process, dev_node) { + /* assumes that isci_terminate_request_core deletes from the list */ + while (!list_empty(&list)) { + struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); - init_completion(&request_completion); + /* Change state to "terminating" if it is currently + * "started". + */ + old_state = isci_request_change_started_to_newstate(ireq, + &request_completion, + terminating); + switch (old_state) { + case started: + case completed: + case aborting: + break; + default: + /* termination in progress, or otherwise dispositioned. + * We know the request was on 'list' so should be safe + * to move it back to reqs_in_process + */ + list_move(&ireq->dev_node, &idev->reqs_in_process); + ireq = NULL; + break; + } - /* Change state to "new_request_state" if it is currently - * "started". - */ - old_state = isci_request_change_started_to_newstate( - request, - &request_completion, - new_request_state); + if (!ireq) + continue; + spin_unlock_irqrestore(&ihost->scic_lock, flags); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + init_completion(&request_completion); - if ((old_state == started) || - (old_state == completed) || - (old_state == aborting)) { - - dev_warn(&isci_host->pdev->dev, - "%s: isci_device=%p request=%p; task=%p " - "old_state=%d\n", - __func__, - isci_device, request, - ((request->ttype == io_task) - ? isci_request_access_task(request) - : NULL), - old_state); - - /* If the old_state is started: - * This request was not already being aborted. If it had been, - * then the aborting I/O (ie. the TMF request) would not be in - * the aborting state, and thus would be terminated here. Note - * that since the TMF completion's call to the kernel function - * "complete()" does not happen until the pending I/O request - * terminate fully completes, we do not have to implement a - * special wait here for already aborting requests - the - * termination of the TMF request will force the request - * to finish it's already started terminate. - * - * If old_state == completed: - * This request completed from the SCU hardware perspective - * and now just needs cleaning up in terms of freeing the - * request and potentially calling up to libsas. - * - * If old_state == aborting: - * This request has already gone through a TMF timeout, but may - * not have been terminated; needs cleaning up at least. - */ - isci_terminate_request_core(isci_host, isci_device, - request); - } - spin_lock_irqsave(&isci_host->scic_lock, flags); + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p request=%p; task=%p old_state=%d\n", + __func__, idev, ireq, + ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL, + old_state); + + /* If the old_state is started: + * This request was not already being aborted. If it had been, + * then the aborting I/O (ie. the TMF request) would not be in + * the aborting state, and thus would be terminated here. Note + * that since the TMF completion's call to the kernel function + * "complete()" does not happen until the pending I/O request + * terminate fully completes, we do not have to implement a + * special wait here for already aborting requests - the + * termination of the TMF request will force the request + * to finish it's already started terminate. + * + * If old_state == completed: + * This request completed from the SCU hardware perspective + * and now just needs cleaning up in terms of freeing the + * request and potentially calling up to libsas. + * + * If old_state == aborting: + * This request has already gone through a TMF timeout, but may + * not have been terminated; needs cleaning up at least. + */ + isci_terminate_request_core(ihost, idev, ireq); + spin_lock_irqsave(&ihost->scic_lock, flags); } - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); } /** @@ -965,8 +963,7 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) if (ret == TMF_RESP_FUNC_COMPLETE) /* Terminate all I/O now. */ isci_terminate_pending_requests(isci_host, - isci_device, - terminating); + isci_device); return ret; } -- cgit v1.2.3-70-g09d2 From 0d0cf14c9bd2943ed5afd15df459f564d85eacde Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Mon, 13 Jun 2011 00:51:30 -0700 Subject: isci: cleanup request allocation Rather than return an error code and update a pointer that was passed by reference just return the request object directly (or null if allocation failed). Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 197 +++++++++++++++----------------------------- drivers/scsi/isci/request.h | 11 +-- drivers/scsi/isci/task.c | 136 +++++++++++------------------- 3 files changed, 121 insertions(+), 223 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9d7531ad9a7..f0813d076c5 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -3510,172 +3510,110 @@ static enum sci_status isci_io_request_build( return SCI_SUCCESS; } -/** - * isci_request_alloc_core() - This function gets the request object from the - * isci_host dma cache. - * @isci_host: This parameter specifies the ISCI host object - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @isci_device: This parameter is the pointer to the isci remote device object - * that is the destination for this request. - * @gfp_flags: This parameter specifies the os allocation flags. - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -static int isci_request_alloc_core( - struct isci_host *isci_host, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) +static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, + struct isci_remote_device *idev, + gfp_t gfp_flags) { - int ret = 0; dma_addr_t handle; - struct isci_request *request; - + struct isci_request *ireq; - /* get pointer to dma memory. This actually points - * to both the isci_remote_device object and the - * sci object. The isci object is at the beginning - * of the memory allocated here. - */ - request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); - if (!request) { - dev_warn(&isci_host->pdev->dev, + ireq = dma_pool_alloc(ihost->dma_pool, gfp_flags, &handle); + if (!ireq) { + dev_warn(&ihost->pdev->dev, "%s: dma_pool_alloc returned NULL\n", __func__); - return -ENOMEM; + return NULL; } /* initialize the request object. */ - spin_lock_init(&request->state_lock); - request->request_daddr = handle; - request->isci_host = isci_host; - request->isci_device = isci_device; - request->io_request_completion = NULL; - request->terminated = false; + spin_lock_init(&ireq->state_lock); + ireq->request_daddr = handle; + ireq->isci_host = ihost; + ireq->isci_device = idev; + ireq->io_request_completion = NULL; + ireq->terminated = false; - request->num_sg_entries = 0; + ireq->num_sg_entries = 0; - request->complete_in_target = false; + ireq->complete_in_target = false; - INIT_LIST_HEAD(&request->completed_node); - INIT_LIST_HEAD(&request->dev_node); + INIT_LIST_HEAD(&ireq->completed_node); + INIT_LIST_HEAD(&ireq->dev_node); - *isci_request = request; - isci_request_change_state(request, allocated); + isci_request_change_state(ireq, allocated); - return ret; + return ireq; } -static int isci_request_alloc_io( - struct isci_host *isci_host, - struct sas_task *task, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) +static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, + struct sas_task *task, + struct isci_remote_device *idev, + gfp_t gfp_flags) { - int retval = isci_request_alloc_core(isci_host, isci_request, - isci_device, gfp_flags); - - if (!retval) { - (*isci_request)->ttype_ptr.io_task_ptr = task; - (*isci_request)->ttype = io_task; + struct isci_request *ireq; - task->lldd_task = *isci_request; + ireq = isci_request_alloc_core(ihost, idev, gfp_flags); + if (ireq) { + ireq->ttype_ptr.io_task_ptr = task; + ireq->ttype = io_task; + task->lldd_task = ireq; } - return retval; + return ireq; } -/** - * isci_request_alloc_tmf() - This function gets the request object from the - * isci_host dma cache and initializes the relevant fields as a sas_task. - * @isci_host: This parameter specifies the ISCI host object - * @sas_task: This parameter is the task struct from the upper layer driver. - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @isci_device: This parameter is the pointer to the isci remote device object - * that is the destination for this request. - * @gfp_flags: This parameter specifies the os allocation flags. - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -int isci_request_alloc_tmf( - struct isci_host *isci_host, - struct isci_tmf *isci_tmf, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags) +struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + struct isci_remote_device *idev, + gfp_t gfp_flags) { - int retval = isci_request_alloc_core(isci_host, isci_request, - isci_device, gfp_flags); - - if (!retval) { + struct isci_request *ireq; - (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; - (*isci_request)->ttype = tmf_task; + ireq = isci_request_alloc_core(ihost, idev, gfp_flags); + if (ireq) { + ireq->ttype_ptr.tmf_task_ptr = isci_tmf; + ireq->ttype = tmf_task; } - return retval; + return ireq; } -/** - * isci_request_execute() - This function allocates the isci_request object, - * all fills in some common fields. - * @isci_host: This parameter specifies the ISCI host object - * @sas_task: This parameter is the task struct from the upper layer driver. - * @isci_request: This parameter will contain the pointer to the new - * isci_request object. - * @gfp_flags: This parameter specifies the os allocation flags. - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -int isci_request_execute( - struct isci_host *isci_host, - struct sas_task *task, - struct isci_request **isci_request, - gfp_t gfp_flags) +int isci_request_execute(struct isci_host *ihost, struct sas_task *task, + gfp_t gfp_flags) { - int ret = 0; - struct scic_sds_remote_device *sci_device; enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - struct isci_remote_device *isci_device; - struct isci_request *request; + struct scic_sds_remote_device *sci_dev; + struct isci_remote_device *idev; + struct isci_request *ireq; unsigned long flags; + int ret = 0; - isci_device = task->dev->lldd_dev; - sci_device = &isci_device->sci; + idev = task->dev->lldd_dev; + sci_dev = &idev->sci; /* do common allocation and init of request object. */ - ret = isci_request_alloc_io( - isci_host, - task, - &request, - isci_device, - gfp_flags - ); - - if (ret) + ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags); + if (!ireq) goto out; - status = isci_io_request_build(isci_host, request, isci_device); + status = isci_io_request_build(ihost, ireq, idev); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: request_construct failed - status = 0x%x\n", __func__, status); goto out; } - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&isci_host->sci, sci_device, - &request->sci, + status = scic_controller_start_io(&ihost->sci, sci_dev, + &ireq->sci, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS && status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: failed request start (0x%x)\n", __func__, status); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); goto out; } @@ -3687,21 +3625,21 @@ int isci_request_execute( * Update it's status and add it to the list in the * remote device object. */ - list_add(&request->dev_node, &isci_device->reqs_in_process); + list_add(&ireq->dev_node, &idev->reqs_in_process); if (status == SCI_SUCCESS) { /* Save the tag for possible task mgmt later. */ - request->io_tag = request->sci.io_tag; - isci_request_change_state(request, started); + ireq->io_tag = ireq->sci.io_tag; + isci_request_change_state(ireq, started); } else { /* The request did not really start in the * hardware, so clear the request handle * here so no terminations will be done. */ - request->terminated = true; - isci_request_change_state(request, completed); + ireq->terminated = true; + isci_request_change_state(ireq, completed); } - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { @@ -3716,7 +3654,7 @@ int isci_request_execute( /* Cause this task to be scheduled in the SCSI error * handler thread. */ - isci_execpath_callback(isci_host, task, + isci_execpath_callback(ihost, task, sas_task_abort); /* Change the status, since we are holding @@ -3729,11 +3667,10 @@ int isci_request_execute( out: if (status != SCI_SUCCESS) { /* release dma memory on failure. */ - isci_request_free(isci_host, request); - request = NULL; + isci_request_free(ihost, ireq); + ireq = NULL; ret = SCI_FAILURE; } - *isci_request = request; return ret; } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index ac9368c5a6b..8de2542f081 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -679,16 +679,13 @@ static inline void isci_request_free(struct isci_host *isci_host, #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) -int isci_request_alloc_tmf(struct isci_host *isci_host, - struct isci_tmf *isci_tmf, - struct isci_request **isci_request, - struct isci_remote_device *isci_device, - gfp_t gfp_flags); - +struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + struct isci_remote_device *idev, + gfp_t gfp_flags); int isci_request_execute(struct isci_host *isci_host, struct sas_task *task, - struct isci_request **request, gfp_t gfp_flags); /** diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 01032dc2c11..ded81cd1a78 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -146,7 +146,6 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) { struct isci_host *ihost = dev_to_ihost(task->dev); - struct isci_request *request = NULL; struct isci_remote_device *device; unsigned long flags; int ret; @@ -226,8 +225,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) spin_unlock_irqrestore(&task->task_state_lock, flags); /* build and send the request. */ - status = isci_request_execute(ihost, task, &request, - gfp_flags); + status = isci_request_execute(ihost, task, gfp_flags); if (status != SCI_SUCCESS) { @@ -254,54 +252,34 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) return 0; } - - -/** - * isci_task_request_build() - This function builds the task request object. - * @isci_host: This parameter specifies the ISCI host object - * @request: This parameter points to the isci_request object allocated in the - * request construct function. - * @tmf: This parameter is the task management struct to be built - * - * SCI_SUCCESS on successfull completion, or specific failure code. - */ -static enum sci_status isci_task_request_build( - struct isci_host *isci_host, - struct isci_request **isci_request, - struct isci_tmf *isci_tmf) +static struct isci_request *isci_task_request_build(struct isci_host *ihost, + struct isci_tmf *isci_tmf) { - struct scic_sds_remote_device *sci_device; + struct scic_sds_remote_device *sci_dev; enum sci_status status = SCI_FAILURE; - struct isci_request *request = NULL; - struct isci_remote_device *isci_device; + struct isci_request *ireq = NULL; + struct isci_remote_device *idev; struct domain_device *dev; - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: isci_tmf = %p\n", __func__, isci_tmf); - isci_device = isci_tmf->device; - sci_device = &isci_device->sci; - dev = isci_device->domain_dev; + idev = isci_tmf->device; + sci_dev = &idev->sci; + dev = idev->domain_dev; /* do common allocation and init of request object. */ - status = isci_request_alloc_tmf( - isci_host, - isci_tmf, - &request, - isci_device, - GFP_ATOMIC - ); - - if (status != SCI_SUCCESS) - goto out; + ireq = isci_request_alloc_tmf(ihost, isci_tmf, idev, GFP_ATOMIC); + if (!ireq) + return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(&isci_host->sci, sci_device, + status = scic_task_request_construct(&ihost->sci, sci_dev, SCI_CONTROLLER_INVALID_IO_TAG, - &request->sci); + &ireq->sci); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: scic_task_request_construct failed - " "status = 0x%x\n", __func__, @@ -312,30 +290,23 @@ static enum sci_status isci_task_request_build( /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEV) { isci_tmf->proto = SAS_PROTOCOL_SSP; - status = scic_task_request_construct_ssp(&request->sci); + status = scic_task_request_construct_ssp(&ireq->sci); if (status != SCI_SUCCESS) goto errout; } if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { isci_tmf->proto = SAS_PROTOCOL_SATA; - status = isci_sata_management_task_request_build(request); + status = isci_sata_management_task_request_build(ireq); if (status != SCI_SUCCESS) goto errout; } - - goto out; - + return ireq; errout: - - /* release the dma memory if we fail. */ - isci_request_free(isci_host, request); - request = NULL; - - out: - *isci_request = request; - return status; + isci_request_free(ihost, ireq); + ireq = NULL; + return ireq; } /** @@ -350,16 +321,14 @@ static enum sci_status isci_task_request_build( * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED. */ -int isci_task_execute_tmf( - struct isci_host *isci_host, - struct isci_tmf *tmf, - unsigned long timeout_ms) +int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, + unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct scic_sds_remote_device *sci_device; struct isci_remote_device *isci_device = tmf->device; - struct isci_request *request; + struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; @@ -368,13 +337,13 @@ int isci_task_execute_tmf( * if the device is not there and ready. */ if (!isci_device || isci_device->status != isci_ready_for_io) { - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p not ready (%d)\n", __func__, isci_device, isci_device->status); return TMF_RESP_FUNC_FAILED; } else - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, isci_device); @@ -383,64 +352,59 @@ int isci_task_execute_tmf( /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; - isci_task_request_build( - isci_host, - &request, - tmf - ); - - if (!request) { - dev_warn(&isci_host->pdev->dev, + ireq = isci_task_request_build(ihost, tmf); + if (!ireq) { + dev_warn(&ihost->pdev->dev, "%s: isci_task_request_build failed\n", __func__); return TMF_RESP_FUNC_FAILED; } - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ status = scic_controller_start_task( - &isci_host->sci, + &ihost->sci, sci_device, - &request->sci, + &ireq->sci, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_TASK_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, - request); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + ireq); + spin_unlock_irqrestore(&ihost->scic_lock, flags); goto cleanup_request; } if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); - isci_request_change_state(request, started); + isci_request_change_state(ireq, started); /* add the request to the remote device request list. */ - list_add(&request->dev_node, &isci_device->reqs_in_process); + list_add(&ireq->dev_node, &isci_device->reqs_in_process); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the TMF to complete, or a timeout. */ timeleft = wait_for_completion_timeout(&completion, jiffies + msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); status = scic_controller_terminate_request( - &request->isci_host->sci, - &request->isci_device->sci, - &request->sci); + &ireq->isci_host->sci, + &ireq->isci_device->sci, + &ireq->sci); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); } isci_print_tmf(tmf); @@ -448,7 +412,7 @@ int isci_task_execute_tmf( if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); @@ -456,18 +420,18 @@ int isci_task_execute_tmf( } /* Else - leave the default "failed" status alone. */ - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, - request); + ireq); - if (request->io_request_completion != NULL) { + if (ireq->io_request_completion != NULL) { /* A thread is waiting for this TMF to finish. */ - complete(request->io_request_completion); + complete(ireq->io_request_completion); } cleanup_request: - isci_request_free(isci_host, request); + isci_request_free(ihost, ireq); return ret; } -- cgit v1.2.3-70-g09d2 From 209fae14fabfd48525e5630bebbbd4ca15090c60 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Mon, 13 Jun 2011 17:39:44 -0700 Subject: isci: atomic device lookup and reference counting We have unsafe references to remote devices that are notified to disappear at lldd_dev_gone. In order to clean this up we need a single canonical source for device lookups and stable references once a lookup succeeds. Towards that end guarantee that domain_device.lldd_dev is NULL as soon as we start the process of stopping a device. Any code path that wants to safely lookup a remote device must do so through task->dev->lldd_dev (isci_lookup_device()). For in-flight references outside of scic_lock we need reference counting to ensure that the device is not recycled before we are done with it. Simplify device back references to just scic_sds_request.target_device which is now the only permissible internal reference that is maintained relative to the reference count. There were two occasions where we wanted new i/o's to be treated as SAS_TASK_UNDELIVERED but where the domain_dev->lldd_dev link is still intact. Introduce a 'gone' flag to prevent i/o while waiting for libsas to take action on the port down event. One 'core' leftover is that we currently call scic_remote_device_destruct() from isci_remote_device_deconstruct() which is called when the 'core' says the device is stopped. It would be more natural for the final put to trigger isci_remote_device_deconstruct() but this implementation is deferred as it requires other changes. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 4 +- drivers/scsi/isci/port.c | 3 +- drivers/scsi/isci/remote_device.c | 80 ++++++------- drivers/scsi/isci/remote_device.h | 23 ++++ drivers/scsi/isci/request.c | 60 ++++------ drivers/scsi/isci/request.h | 7 +- drivers/scsi/isci/sata.c | 7 +- drivers/scsi/isci/task.c | 231 +++++++++++++++----------------------- drivers/scsi/isci/task.h | 9 +- 9 files changed, 187 insertions(+), 237 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index ae9edae1d24..40f35fad244 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -1327,8 +1327,8 @@ void isci_host_deinit(struct isci_host *ihost) struct isci_remote_device *idev, *d; list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { - isci_remote_device_change_state(idev, isci_stopping); - isci_remote_device_stop(ihost, idev); + if (test_bit(IDEV_ALLOCATED, &idev->flags)) + isci_remote_device_stop(ihost, idev); } } diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index fb66e30da07..5f4a4e3954d 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -321,8 +321,7 @@ static void isci_port_link_down(struct isci_host *isci_host, dev_dbg(&isci_host->pdev->dev, "%s: isci_device = %p\n", __func__, isci_device); - isci_remote_device_change_state(isci_device, - isci_stopping); + set_bit(IDEV_GONE, &isci_device->flags); } } isci_port_change_state(isci_port, isci_stopping); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 45592ad33c3..ab5f9868e4e 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, "%s: isci_device = %p\n", __func__, idev); if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED) - isci_remote_device_change_state(idev, isci_stopping); + set_bit(IDEV_GONE, &idev->flags); else /* device ready is actually a "not ready for io" state. */ isci_remote_device_change_state(idev, isci_ready); @@ -449,8 +449,10 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device * /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) scic_sds_port_complete_io(sci_port, sci_dev, sci_req); - else + else { + kref_get(&sci_dev_to_idev(sci_dev)->kref); scic_sds_remote_device_increment_request_count(sci_dev); + } } enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, @@ -656,6 +658,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " "could not complete\n", __func__, sci_port, sci_dev, sci_req, status); + else + isci_put_device(sci_dev_to_idev(sci_dev)); return status; } @@ -860,23 +864,11 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ * here should go through isci_remote_device_nuke_requests. * If we hit this condition, we will need a way to complete * io requests in process */ - while (!list_empty(&idev->reqs_in_process)) { - - dev_err(&ihost->pdev->dev, - "%s: ** request list not empty! **\n", __func__); - BUG(); - } + BUG_ON(!list_empty(&idev->reqs_in_process)); scic_remote_device_destruct(&idev->sci); - idev->domain_dev->lldd_dev = NULL; - idev->domain_dev = NULL; - idev->isci_port = NULL; list_del_init(&idev->node); - - clear_bit(IDEV_START_PENDING, &idev->flags); - clear_bit(IDEV_STOP_PENDING, &idev->flags); - clear_bit(IDEV_EH, &idev->flags); - wake_up(&ihost->eventq); + isci_put_device(idev); } /** @@ -1314,6 +1306,22 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) return idev; } +void isci_remote_device_release(struct kref *kref) +{ + struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); + struct isci_host *ihost = idev->isci_port->isci_host; + + idev->domain_dev = NULL; + idev->isci_port = NULL; + clear_bit(IDEV_START_PENDING, &idev->flags); + clear_bit(IDEV_STOP_PENDING, &idev->flags); + clear_bit(IDEV_GONE, &idev->flags); + clear_bit(IDEV_EH, &idev->flags); + smp_mb__before_clear_bit(); + clear_bit(IDEV_ALLOCATED, &idev->flags); + wake_up(&ihost->eventq); +} + /** * isci_remote_device_stop() - This function is called internally to stop the * remote device. @@ -1330,7 +1338,11 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); + spin_lock_irqsave(&ihost->scic_lock, flags); + idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ + set_bit(IDEV_GONE, &idev->flags); isci_remote_device_change_state(idev, isci_stopping); + spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Kill all outstanding requests. */ isci_remote_device_nuke_requests(ihost, idev); @@ -1342,14 +1354,10 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the stop complete callback. */ - if (status == SCI_SUCCESS) { + if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) + /* nothing to wait for */; + else wait_for_device_stop(ihost, idev); - clear_bit(IDEV_ALLOCATED, &idev->flags); - } - - dev_dbg(&ihost->pdev->dev, - "%s: idev = %p - after completion wait\n", - __func__, idev); return status; } @@ -1416,39 +1424,33 @@ int isci_remote_device_found(struct domain_device *domain_dev) if (!isci_device) return -ENODEV; + kref_init(&isci_device->kref); INIT_LIST_HEAD(&isci_device->node); - domain_dev->lldd_dev = isci_device; + + spin_lock_irq(&isci_host->scic_lock); isci_device->domain_dev = domain_dev; isci_device->isci_port = isci_port; isci_remote_device_change_state(isci_device, isci_starting); - - - spin_lock_irq(&isci_host->scic_lock); list_add_tail(&isci_device->node, &isci_port->remote_dev_list); set_bit(IDEV_START_PENDING, &isci_device->flags); status = isci_remote_device_construct(isci_port, isci_device); - spin_unlock_irq(&isci_host->scic_lock); dev_dbg(&isci_host->pdev->dev, "%s: isci_device = %p\n", __func__, isci_device); - if (status != SCI_SUCCESS) { - - spin_lock_irq(&isci_host->scic_lock); - isci_remote_device_deconstruct( - isci_host, - isci_device - ); - spin_unlock_irq(&isci_host->scic_lock); - return -ENODEV; - } + if (status == SCI_SUCCESS) { + /* device came up, advertise it to the world */ + domain_dev->lldd_dev = isci_device; + } else + isci_put_device(isci_device); + spin_unlock_irq(&isci_host->scic_lock); /* wait for the device ready callback. */ wait_for_device_start(isci_host, isci_device); - return 0; + return status == SCI_SUCCESS ? 0 : -ENODEV; } /** * isci_device_is_reset_pending() - This function will check if there is any diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 2b6a5bb7bd6..05842b5f1e3 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -56,6 +56,7 @@ #ifndef _ISCI_REMOTE_DEVICE_H_ #define _ISCI_REMOTE_DEVICE_H_ #include <scsi/libsas.h> +#include <linux/kref.h> #include "scu_remote_node_context.h" #include "remote_node_context.h" #include "port.h" @@ -134,7 +135,9 @@ struct isci_remote_device { #define IDEV_STOP_PENDING 1 #define IDEV_ALLOCATED 2 #define IDEV_EH 3 + #define IDEV_GONE 4 unsigned long flags; + struct kref kref; struct isci_port *isci_port; struct domain_device *domain_dev; struct list_head node; @@ -145,6 +148,26 @@ struct isci_remote_device { #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 +/* device reference routines must be called under scic_lock */ +static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) +{ + struct isci_remote_device *idev = dev->lldd_dev; + + if (idev && !test_bit(IDEV_GONE, &idev->flags)) { + kref_get(&idev->kref); + return idev; + } + + return NULL; +} + +void isci_remote_device_release(struct kref *kref); +static inline void isci_put_device(struct isci_remote_device *idev) +{ + if (idev) + kref_put(&idev->kref, isci_remote_device_release); +} + enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev); void isci_remote_device_nuke_requests(struct isci_host *ihost, diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index f0813d076c5..fd6314abeb0 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2313,7 +2313,7 @@ static void isci_request_set_open_reject_status( * none. */ static void isci_request_handle_controller_specific_errors( - struct isci_remote_device *isci_device, + struct isci_remote_device *idev, struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, @@ -2353,8 +2353,7 @@ static void isci_request_handle_controller_specific_errors( * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAS_ABORTED_TASK; @@ -2367,8 +2366,7 @@ static void isci_request_handle_controller_specific_errors( /* Task in the target is not done. */ *response_ptr = SAS_TASK_UNDELIVERED; - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAM_STAT_TASK_ABORTED; @@ -2399,8 +2397,7 @@ static void isci_request_handle_controller_specific_errors( * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAS_ABORTED_TASK; @@ -2629,7 +2626,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, struct ssp_response_iu *resp_iu; void *resp_buf; unsigned long task_flags; - struct isci_remote_device *isci_device = request->isci_device; + struct isci_remote_device *idev = isci_lookup_device(task->dev); enum service_response response = SAS_TASK_UNDELIVERED; enum exec_status status = SAS_ABORTED_TASK; enum isci_request_status request_status; @@ -2672,9 +2669,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) - || (isci_device->status == isci_stopped) - ) + if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; @@ -2697,8 +2692,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, request->complete_in_target = true; response = SAS_TASK_UNDELIVERED; - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) /* The device has been /is being stopped. Note that * we ignore the quiesce state, since we are * concerned about the actual device state. @@ -2728,8 +2722,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; @@ -2861,8 +2854,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; @@ -2873,7 +2865,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: isci_request_handle_controller_specific_errors( - isci_device, request, task, &response, &status, + idev, request, task, &response, &status, &complete_to_host); break; @@ -2902,8 +2894,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* Fail the I/O so it can be retried. */ response = SAS_TASK_UNDELIVERED; - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; @@ -2926,8 +2917,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * that we ignore the quiesce state, since we are * concerned about the actual device state. */ - if ((isci_device->status == isci_stopping) || - (isci_device->status == isci_stopped)) + if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; @@ -2953,8 +2943,10 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* complete the io request to the core. */ scic_controller_complete_io(&isci_host->sci, - &isci_device->sci, + request->sci.target_device, &request->sci); + isci_put_device(idev); + /* set terminated handle so it cannot be completed or * terminated again, and to cause any calls into abort * task to recognize the already completed case. @@ -3511,7 +3503,6 @@ static enum sci_status isci_io_request_build( } static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, - struct isci_remote_device *idev, gfp_t gfp_flags) { dma_addr_t handle; @@ -3528,7 +3519,6 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, spin_lock_init(&ireq->state_lock); ireq->request_daddr = handle; ireq->isci_host = ihost; - ireq->isci_device = idev; ireq->io_request_completion = NULL; ireq->terminated = false; @@ -3546,12 +3536,11 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, struct sas_task *task, - struct isci_remote_device *idev, gfp_t gfp_flags) { struct isci_request *ireq; - ireq = isci_request_alloc_core(ihost, idev, gfp_flags); + ireq = isci_request_alloc_core(ihost, gfp_flags); if (ireq) { ireq->ttype_ptr.io_task_ptr = task; ireq->ttype = io_task; @@ -3562,12 +3551,11 @@ static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_tmf *isci_tmf, - struct isci_remote_device *idev, gfp_t gfp_flags) { struct isci_request *ireq; - ireq = isci_request_alloc_core(ihost, idev, gfp_flags); + ireq = isci_request_alloc_core(ihost, gfp_flags); if (ireq) { ireq->ttype_ptr.tmf_task_ptr = isci_tmf; ireq->ttype = tmf_task; @@ -3575,21 +3563,16 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, return ireq; } -int isci_request_execute(struct isci_host *ihost, struct sas_task *task, - gfp_t gfp_flags) +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, gfp_t gfp_flags) { enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; - struct scic_sds_remote_device *sci_dev; - struct isci_remote_device *idev; struct isci_request *ireq; unsigned long flags; int ret = 0; - idev = task->dev->lldd_dev; - sci_dev = &idev->sci; - /* do common allocation and init of request object. */ - ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags); + ireq = isci_request_alloc_io(ihost, task, gfp_flags); if (!ireq) goto out; @@ -3605,8 +3588,7 @@ int isci_request_execute(struct isci_host *ihost, struct sas_task *task, spin_lock_irqsave(&ihost->scic_lock, flags); /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&ihost->sci, sci_dev, - &ireq->sci, + status = scic_controller_start_io(&ihost->sci, &idev->sci, &ireq->sci, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS && status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 8de2542f081..9bb7c36257f 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -285,7 +285,6 @@ struct isci_request { struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ } ttype_ptr; struct isci_host *isci_host; - struct isci_remote_device *isci_device; /* For use in the requests_to_{complete|abort} lists: */ struct list_head completed_node; /* For use in the reqs_in_process list: */ @@ -681,12 +680,10 @@ static inline void isci_request_free(struct isci_host *isci_host, struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_tmf *isci_tmf, - struct isci_remote_device *idev, gfp_t gfp_flags); -int isci_request_execute(struct isci_host *isci_host, - struct sas_task *task, - gfp_t gfp_flags); +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, gfp_t gfp_flags); /** * isci_request_unmap_sgl() - This function unmaps the DMA address of a given diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index b9b9271d473..e7ce4692446 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -213,11 +213,10 @@ int isci_task_send_lu_reset_sata( /* Send the soft reset to the target */ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ - isci_task_build_tmf(&tmf, isci_device, isci_tmf_sata_srst_high, - NULL, NULL - ); + isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL); - ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_SRST_TIMEOUT_MS); + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, + ISCI_SRST_TIMEOUT_MS); if (ret != TMF_RESP_FUNC_COMPLETE) { dev_warn(&isci_host->pdev->dev, diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index ded81cd1a78..dd5e9de1ffb 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -146,7 +146,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) { struct isci_host *ihost = dev_to_ihost(task->dev); - struct isci_remote_device *device; + struct isci_remote_device *idev; unsigned long flags; int ret; enum sci_status status; @@ -166,11 +166,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) dev_dbg(&ihost->pdev->dev, "task = %p, num = %d; dev = %p; cmd = %p\n", task, num, task->dev, task->uldd_task); + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(task->dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); - device = task->dev->lldd_dev; - - if (device) - device_status = device->status; + if (idev) + device_status = idev->status; else device_status = isci_freed; @@ -188,7 +189,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) __func__, task, isci_host_get_state(ihost), - device, + idev, device_status); if (device_status == isci_ready) { @@ -225,7 +226,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) spin_unlock_irqrestore(&task->task_state_lock, flags); /* build and send the request. */ - status = isci_request_execute(ihost, task, gfp_flags); + status = isci_request_execute(ihost, idev, task, gfp_flags); if (status != SCI_SUCCESS) { @@ -248,33 +249,31 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) } } } + isci_put_device(idev); } return 0; } static struct isci_request *isci_task_request_build(struct isci_host *ihost, + struct isci_remote_device *idev, struct isci_tmf *isci_tmf) { - struct scic_sds_remote_device *sci_dev; enum sci_status status = SCI_FAILURE; struct isci_request *ireq = NULL; - struct isci_remote_device *idev; struct domain_device *dev; dev_dbg(&ihost->pdev->dev, "%s: isci_tmf = %p\n", __func__, isci_tmf); - idev = isci_tmf->device; - sci_dev = &idev->sci; dev = idev->domain_dev; /* do common allocation and init of request object. */ - ireq = isci_request_alloc_tmf(ihost, isci_tmf, idev, GFP_ATOMIC); + ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC); if (!ireq) return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(&ihost->sci, sci_dev, + status = scic_task_request_construct(&ihost->sci, &idev->sci, SCI_CONTROLLER_INVALID_IO_TAG, &ireq->sci); @@ -309,25 +308,13 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return ireq; } -/** - * isci_task_execute_tmf() - This function builds and sends a task request, - * then waits for the completion. - * @isci_host: This parameter specifies the ISCI host object - * @tmf: This parameter is the pointer to the task management structure for - * this request. - * @timeout_ms: This parameter specifies the timeout period for the task - * management request. - * - * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes - * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED. - */ -int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, - unsigned long timeout_ms) +int isci_task_execute_tmf(struct isci_host *ihost, + struct isci_remote_device *isci_device, + struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; struct scic_sds_remote_device *sci_device; - struct isci_remote_device *isci_device = tmf->device; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; @@ -352,7 +339,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; - ireq = isci_task_request_build(ihost, tmf); + ireq = isci_task_request_build(ihost, isci_device, tmf); if (!ireq) { dev_warn(&ihost->pdev->dev, "%s: isci_task_request_build failed\n", @@ -399,10 +386,9 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); - status = scic_controller_terminate_request( - &ireq->isci_host->sci, - &ireq->isci_device->sci, - &ireq->sci); + status = scic_controller_terminate_request(&ihost->sci, + &isci_device->sci, + &ireq->sci); spin_unlock_irqrestore(&ihost->scic_lock, flags); } @@ -437,65 +423,32 @@ int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, void isci_task_build_tmf( struct isci_tmf *tmf, - struct isci_remote_device *isci_device, enum isci_tmf_function_codes code, void (*tmf_sent_cb)(enum isci_tmf_cb_state, struct isci_tmf *, void *), void *cb_data) { - dev_dbg(&isci_device->isci_port->isci_host->pdev->dev, - "%s: isci_device = %p\n", __func__, isci_device); - memset(tmf, 0, sizeof(*tmf)); - tmf->device = isci_device; tmf->tmf_code = code; - tmf->cb_state_func = tmf_sent_cb; tmf->cb_data = cb_data; } static void isci_task_build_abort_task_tmf( struct isci_tmf *tmf, - struct isci_remote_device *isci_device, enum isci_tmf_function_codes code, void (*tmf_sent_cb)(enum isci_tmf_cb_state, struct isci_tmf *, void *), struct isci_request *old_request) { - isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb, + isci_task_build_tmf(tmf, code, tmf_sent_cb, (void *)old_request); tmf->io_tag = old_request->io_tag; } -static struct isci_request *isci_task_get_request_from_task( - struct sas_task *task, - struct isci_remote_device **isci_device) -{ - - struct isci_request *request = NULL; - unsigned long flags; - - spin_lock_irqsave(&task->task_state_lock, flags); - - request = task->lldd_task; - - /* If task is already done, the request isn't valid */ - if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && - (task->task_state_flags & SAS_TASK_AT_INITIATOR) && - (request != NULL)) { - - if (isci_device != NULL) - *isci_device = request->isci_device; - } - - spin_unlock_irqrestore(&task->task_state_lock, flags); - - return request; -} - /** * isci_task_validate_request_to_abort() - This function checks the given I/O * against the "started" state. If the request is still "started", it's @@ -858,11 +811,10 @@ static int isci_task_send_lu_reset_sas( * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). */ - isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL, - NULL); + isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ - ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS); + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); if (ret == TMF_RESP_FUNC_COMPLETE) dev_dbg(&isci_host->pdev->dev, @@ -888,33 +840,33 @@ static int isci_task_send_lu_reset_sas( int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) { struct isci_host *isci_host = dev_to_ihost(domain_device); - struct isci_remote_device *isci_device = NULL; + struct isci_remote_device *isci_device; + unsigned long flags; int ret; - bool device_stopping = false; - isci_device = domain_device->lldd_dev; + spin_lock_irqsave(&isci_host->scic_lock, flags); + isci_device = isci_lookup_device(domain_device); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); dev_dbg(&isci_host->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", __func__, domain_device, isci_host, isci_device); - if (isci_device != NULL) { - device_stopping = (isci_device->status == isci_stopping) - || (isci_device->status == isci_stopped); + if (isci_device) set_bit(IDEV_EH, &isci_device->flags); - } /* If there is a device reset pending on any request in the * device's list, fail this LUN reset request in order to * escalate to the device reset. */ - if (!isci_device || device_stopping || + if (!isci_device || isci_device_is_reset_pending(isci_host, isci_device)) { dev_warn(&isci_host->pdev->dev, "%s: No dev (%p), or " "RESET PENDING: domain_device=%p\n", __func__, isci_device, domain_device); - return TMF_RESP_FUNC_FAILED; + ret = TMF_RESP_FUNC_FAILED; + goto out; } /* Send the task management part of the reset. */ @@ -929,6 +881,8 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) isci_terminate_pending_requests(isci_host, isci_device); + out: + isci_put_device(isci_device); return ret; } @@ -1023,60 +977,54 @@ int isci_task_abort_task(struct sas_task *task) int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; bool any_dev_reset = false; - bool device_stopping; /* Get the isci_request reference from the task. Note that * this check does not depend on the pending request list * in the device, because tasks driving resets may land here * after completion in the core. */ - old_request = isci_task_get_request_from_task(task, &isci_device); + spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock(&task->task_state_lock); + + old_request = task->lldd_task; + + /* If task is already done, the request isn't valid */ + if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && + (task->task_state_flags & SAS_TASK_AT_INITIATOR) && + old_request) + isci_device = isci_lookup_device(task->dev); + + spin_unlock(&task->task_state_lock); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); dev_dbg(&isci_host->pdev->dev, "%s: task = %p\n", __func__, task); - /* Check if the device has been / is currently being removed. - * If so, no task management will be done, and the I/O will - * be terminated. - */ - device_stopping = (isci_device->status == isci_stopping) - || (isci_device->status == isci_stopped); + if (!isci_device || !old_request) + goto out; - /* XXX need to fix device lookup lifetime (needs to be done - * under scic_lock, among other things...), but for now assume - * the device is available like the above code - */ set_bit(IDEV_EH, &isci_device->flags); /* This version of the driver will fail abort requests for * SATA/STP. Failing the abort request this way will cause the * SCSI error handler thread to escalate to LUN reset */ - if (sas_protocol_ata(task->task_proto) && !device_stopping) { + if (sas_protocol_ata(task->task_proto)) { dev_warn(&isci_host->pdev->dev, " task %p is for a STP/SATA device;" " returning TMF_RESP_FUNC_FAILED\n" " to cause a LUN reset...\n", task); - return TMF_RESP_FUNC_FAILED; + goto out; } dev_dbg(&isci_host->pdev->dev, "%s: old_request == %p\n", __func__, old_request); - if (!device_stopping) - any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device); + any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device); spin_lock_irqsave(&task->task_state_lock, flags); - /* Don't do resets to stopping devices. */ - if (device_stopping) { - - task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; - any_dev_reset = false; - - } else /* See if there is a pending device reset for this device. */ - any_dev_reset = any_dev_reset - || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET); + any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET); /* If the extraction of the request reference from the task * failed, then the request has been completed (or if there is a @@ -1130,8 +1078,7 @@ int isci_task_abort_task(struct sas_task *task) "%s: abort task not needed for %p\n", __func__, task); } - - return ret; + goto out; } else spin_unlock_irqrestore(&task->task_state_lock, flags); @@ -1158,11 +1105,10 @@ int isci_task_abort_task(struct sas_task *task) "%s: device = %p; old_request %p already being aborted\n", __func__, isci_device, old_request); - - return TMF_RESP_FUNC_COMPLETE; + ret = TMF_RESP_FUNC_COMPLETE; + goto out; } if ((task->task_proto == SAS_PROTOCOL_SMP) - || device_stopping || old_request->complete_in_target ) { @@ -1170,10 +1116,9 @@ int isci_task_abort_task(struct sas_task *task) dev_dbg(&isci_host->pdev->dev, "%s: SMP request (%d)" - " or device is stopping (%d)" " or complete_in_target (%d), thus no TMF\n", __func__, (task->task_proto == SAS_PROTOCOL_SMP), - device_stopping, old_request->complete_in_target); + old_request->complete_in_target); /* Set the state on the task. */ isci_task_all_done(task); @@ -1185,15 +1130,14 @@ int isci_task_abort_task(struct sas_task *task) */ } else { /* Fill in the tmf stucture */ - isci_task_build_abort_task_tmf(&tmf, isci_device, - isci_tmf_ssp_task_abort, + isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, isci_abort_task_process_cb, old_request); spin_unlock_irqrestore(&isci_host->scic_lock, flags); #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ - ret = isci_task_execute_tmf(isci_host, &tmf, + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_ABORT_TASK_TIMEOUT_MS); if (ret != TMF_RESP_FUNC_COMPLETE) @@ -1212,6 +1156,8 @@ int isci_task_abort_task(struct sas_task *task) /* Make sure we do not leave a reference to aborted_io_completion */ old_request->io_request_completion = NULL; + out: + isci_put_device(isci_device); return ret; } @@ -1305,7 +1251,6 @@ isci_task_request_complete(struct isci_host *ihost, struct isci_request *ireq, enum sci_task_status completion_status) { - struct isci_remote_device *idev = ireq->isci_device; struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete; struct scic_sds_request *sci_req = &ireq->sci; @@ -1332,7 +1277,7 @@ isci_task_request_complete(struct isci_host *ihost, /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci); + scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ @@ -1583,11 +1528,10 @@ static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); } -static int isci_reset_device(struct domain_device *dev, int hard_reset) +static int isci_reset_device(struct isci_host *ihost, + struct isci_remote_device *idev, int hard_reset) { - struct isci_remote_device *idev = dev->lldd_dev; - struct sas_phy *phy = sas_find_local_phy(dev); - struct isci_host *ihost = dev_to_ihost(dev); + struct sas_phy *phy = sas_find_local_phy(idev->domain_dev); struct isci_port *iport = idev->isci_port; enum sci_status status; unsigned long flags; @@ -1595,14 +1539,6 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset) dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); - if (!idev) { - dev_warn(&ihost->pdev->dev, - "%s: idev is GONE!\n", - __func__); - - return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */ - } - spin_lock_irqsave(&ihost->scic_lock, flags); status = scic_remote_device_reset(&idev->sci); if (status != SCI_SUCCESS) { @@ -1662,35 +1598,50 @@ static int isci_reset_device(struct domain_device *dev, int hard_reset) int isci_task_I_T_nexus_reset(struct domain_device *dev) { struct isci_host *ihost = dev_to_ihost(dev); - int ret = TMF_RESP_FUNC_FAILED, hard_reset = 1; struct isci_remote_device *idev; + int ret, hard_reset = 1; unsigned long flags; - /* XXX mvsas is not protecting against ->lldd_dev_gone(), are we - * being too paranoid, or is mvsas busted?! - */ spin_lock_irqsave(&ihost->scic_lock, flags); - idev = dev->lldd_dev; - if (!idev || !test_bit(IDEV_EH, &idev->flags)) - ret = TMF_RESP_FUNC_COMPLETE; + idev = isci_lookup_device(dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); - if (ret == TMF_RESP_FUNC_COMPLETE) - return ret; + if (!idev || !test_bit(IDEV_EH, &idev->flags)) { + ret = TMF_RESP_FUNC_COMPLETE; + goto out; + } if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) hard_reset = 0; - return isci_reset_device(dev, hard_reset); + ret = isci_reset_device(ihost, idev, hard_reset); + out: + isci_put_device(idev); + return ret; } int isci_bus_reset_handler(struct scsi_cmnd *cmd) { struct domain_device *dev = sdev_to_domain_dev(cmd->device); - int hard_reset = 1; + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + int ret, hard_reset = 1; + unsigned long flags; if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) hard_reset = 0; - return isci_reset_device(dev, hard_reset); + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (!idev) { + ret = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + ret = isci_reset_device(ihost, idev, hard_reset); + out: + isci_put_device(idev); + return ret; } diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index d574a18956d..42019de2380 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -213,18 +213,15 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd); void isci_task_build_tmf( struct isci_tmf *tmf, - struct isci_remote_device *isci_device, enum isci_tmf_function_codes code, void (*tmf_sent_cb)(enum isci_tmf_cb_state, struct isci_tmf *, void *), void *cb_data); - -int isci_task_execute_tmf( - struct isci_host *isci_host, - struct isci_tmf *tmf, - unsigned long timeout_ms); +int isci_task_execute_tmf(struct isci_host *isci_host, + struct isci_remote_device *idev, + struct isci_tmf *tmf, unsigned long timeout_ms); /** * enum isci_completion_selection - This enum defines the possible actions to -- cgit v1.2.3-70-g09d2 From ff60639dc9a461883db9192d2da0674a00339f12 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Fri, 17 Jun 2011 13:34:43 -0700 Subject: isci: kill device_sequence Now that we have upleveled device reassignment protection to the isci_remote_device reference count we no longer need this level of self-defense. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.h | 9 --------- drivers/scsi/isci/remote_device.c | 2 -- drivers/scsi/isci/remote_device.h | 11 ----------- drivers/scsi/isci/request.c | 5 ----- drivers/scsi/isci/request.h | 9 --------- 5 files changed, 36 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 94fd54dc9f0..a54397e1bf1 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -187,15 +187,6 @@ struct scic_sds_controller { /* sequence number per tci */ u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; - /** - * This field in the array of sequence values for the RNi. These are used - * to control io request build to io request start operations. The sequence - * value is recorded into an io request when it is built and is checked on - * the io request start operation to make sure that there was not a device - * hot plug between the build and start operation. - */ - u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES]; - /** * This field is a pointer to the memory allocated by the driver for the task * context table. This data is shared between the hardware and software. diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c2e5c05be0c..9f45c2ba730 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -887,8 +887,6 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); struct domain_device *dev = idev->domain_dev; - scic->remote_device_sequence[sci_dev->rnc.remote_node_index]++; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&sci_dev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 33f01144731..cde595078f6 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -410,17 +410,6 @@ static inline bool dev_is_expander(struct domain_device *dev) #define scic_sds_remote_device_get_port(sci_dev) \ ((sci_dev)->owning_port) -/** - * scic_sds_remote_device_get_sequence() - - * - * This macro returns the remote device sequence value - */ -#define scic_sds_remote_device_get_sequence(sci_dev) \ - (\ - scic_sds_remote_device_get_controller(sci_dev)-> \ - remote_device_sequence[(sci_dev)->rnc.remote_node_index] \ - ) - /** * scic_sds_remote_device_get_controller_peg() - * diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index fd6314abeb0..ebe160c83f9 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -753,10 +753,6 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) struct scu_task_context *task_context; enum sci_base_request_states state; - if (sci_req->device_sequence != - scic_sds_remote_device_get_sequence(sci_req->target_device)) - return SCI_FAILURE; - state = sci_req->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { dev_warn(scic_to_dev(scic), @@ -3112,7 +3108,6 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, sci_req->target_device = sci_dev; sci_req->protocol = SCIC_NO_PROTOCOL; sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; - sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); sci_req->sci_status = SCI_SUCCESS; sci_req->scu_status = 0; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 9bb7c36257f..a91d1d6060c 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -231,14 +231,6 @@ struct scic_sds_request { */ u32 saved_rx_frame_index; - /* - * This field in the recorded device sequence for the io request. - * This is recorded during the build operation and is compared in the - * start operation. If the sequence is different then there was a - * change of devices from the build to start operations. - */ - u8 device_sequence; - union { struct { union { @@ -262,7 +254,6 @@ struct scic_sds_request { struct dev_to_host_fis rsp; } stp; }; - }; static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req) -- cgit v1.2.3-70-g09d2 From ddcc7e347a891937be65358b43f40b7f81185f8f Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Fri, 17 Jun 2011 10:40:43 -0700 Subject: isci: fix dma_unmap_sg usage One bug and a cleanup: 1/ Fix cases where we were unmapping invalid addresses (smp requests were being unmapped) [ 604.662770] ------------[ cut here ]------------ [ 604.668026] WARNING: at lib/dma-debug.c:800 check_unmap+0x418/0x740() [ 604.675315] Hardware name: SandyBridge Platform [ 604.680465] isci 0000:03:00.0: DMA-API: device driver tries to free an invalid DMA memory address 2/ The unmap routine is too large to be an inline function, and isci_request_io_request_get_next_sge is unused. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 17 ++++++++- drivers/scsi/isci/request.h | 91 +-------------------------------------------- 2 files changed, 18 insertions(+), 90 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index f4fbca7b1fa..39508495515 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2930,7 +2930,22 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, break; } - isci_request_unmap_sgl(request, isci_host->pdev); + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + if (task->data_dir == DMA_NONE) + break; + if (task->num_scatter == 0) + /* 0 indicates a single dma address */ + dma_unmap_single(&isci_host->pdev->dev, + request->zero_scatter_daddr, + task->total_xfer_len, task->data_dir); + else /* unmap the sgl dma addresses */ + dma_unmap_sg(&isci_host->pdev->dev, task->scatter, + request->num_sg_entries, task->data_dir); + break; + default: + break; + } /* Put the completed request on the correct list */ isci_task_save_for_upper_layer_completion(isci_host, request, response, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index a91d1d6060c..324fb7b3ab4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -672,97 +672,10 @@ static inline void isci_request_free(struct isci_host *isci_host, struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_tmf *isci_tmf, gfp_t gfp_flags); - int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, struct sas_task *task, gfp_t gfp_flags); - -/** - * isci_request_unmap_sgl() - This function unmaps the DMA address of a given - * sgl - * @request: This parameter points to the isci_request object - * @*pdev: This Parameter is the pci_device struct for the controller - * - */ -static inline void -isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev) -{ - struct sas_task *task = isci_request_access_task(request); - - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p, task = %p,\n" - "task->data_dir = %d, is_sata = %d\n ", - __func__, - request, - task, - task->data_dir, - sas_protocol_ata(task->task_proto)); - - if ((task->data_dir != PCI_DMA_NONE) && - !sas_protocol_ata(task->task_proto)) { - if (task->num_scatter == 0) - /* 0 indicates a single dma address */ - dma_unmap_single( - &pdev->dev, - request->zero_scatter_daddr, - task->total_xfer_len, - task->data_dir - ); - - else /* unmap the sgl dma addresses */ - dma_unmap_sg( - &pdev->dev, - task->scatter, - request->num_sg_entries, - task->data_dir - ); - } -} - -/** - * isci_request_io_request_get_next_sge() - This function is called by the sci - * core to retrieve the next sge for a given request. - * @request: This parameter is the isci_request object. - * @current_sge_address: This parameter is the last sge retrieved by the sci - * core for this request. - * - * pointer to the next sge for specified request. - */ -static inline void * -isci_request_io_request_get_next_sge(struct isci_request *request, - void *current_sge_address) -{ - struct sas_task *task = isci_request_access_task(request); - void *ret = NULL; - - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p, " - "current_sge_address = %p, " - "num_scatter = %d\n", - __func__, - request, - current_sge_address, - task->num_scatter); - - if (!current_sge_address) /* First time through.. */ - ret = task->scatter; /* always task->scatter */ - else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */ - ret = NULL; /* there is only one element. */ - else - ret = sg_next(current_sge_address); /* sg_next returns NULL - * for the last element - */ - - dev_dbg(&request->isci_host->pdev->dev, - "%s: next sge address = %p\n", - __func__, - ret); - - return ret; -} - -void -isci_terminate_pending_requests(struct isci_host *ihost, - struct isci_remote_device *idev); +void isci_terminate_pending_requests(struct isci_host *ihost, + struct isci_remote_device *idev); enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, -- cgit v1.2.3-70-g09d2 From e9bf709564e90abea25ca7aeae8c3de5cc6468d7 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Thu, 16 Jun 2011 16:59:56 -0700 Subject: isci: fix support for arbitrarily large smp requests Instead of duplicating the smp request buffer reuse the one provided by libsas. This future proofs the driver to support arbitrarily large smp requests, and shrinks the request structure size by ~700 bytes. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 138 ++++++++++++++++++++------------------------ drivers/scsi/isci/request.h | 1 - drivers/scsi/isci/sas.h | 11 +--- 3 files changed, 65 insertions(+), 85 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 39508495515..1043fed2a40 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2943,6 +2943,20 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, dma_unmap_sg(&isci_host->pdev->dev, task->scatter, request->num_sg_entries, task->data_dir); break; + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg = &task->smp_task.smp_req; + struct smp_req *smp_req; + void *kaddr; + + dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE); + + /* need to swab it back in case the command buffer is re-used */ + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); + smp_req = kaddr + sg->offset; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + kunmap_atomic(kaddr, KM_IRQ0); + break; + } default: break; } @@ -3160,7 +3174,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); else if (dev_is_expander(dev)) - memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); + /* pass */; else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; @@ -3236,30 +3250,54 @@ static enum sci_status isci_request_stp_request_construct( return status; } -/* - * This function will fill in the SCU Task Context for a SMP request. The - * following important settings are utilized: -# task_type == - * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type - * (i.e. non-raw frame) is being utilized to perform task management. -# - * control_frame == 1. This ensures that the proper endianess is set so - * that the bytes are transmitted in the right order for a smp request frame. - * @sci_req: This parameter specifies the smp request object being - * constructed. - * - */ -static void -scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, - ssize_t req_len) +static enum sci_status +scic_io_request_construct_smp(struct device *dev, + struct scic_sds_request *sci_req, + struct sas_task *task) { - dma_addr_t dma_addr; + struct scatterlist *sg = &task->smp_task.smp_req; struct scic_sds_remote_device *sci_dev; - struct scic_sds_port *sci_port; struct scu_task_context *task_context; - ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); + struct scic_sds_port *sci_port; + struct smp_req *smp_req; + void *kaddr; + u8 req_len; + u32 cmd; + + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); + smp_req = kaddr + sg->offset; + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. + */ + if (smp_req->req_len == 0) { + switch (smp_req->func) { + case SMP_DISCOVER: + case SMP_REPORT_PHY_ERR_LOG: + case SMP_REPORT_PHY_SATA: + case SMP_REPORT_ROUTE_INFO: + smp_req->req_len = 2; + break; + case SMP_CONF_ROUTE_INFO: + case SMP_PHY_CONTROL: + case SMP_PHY_TEST_FUNCTION: + smp_req->req_len = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + req_len = smp_req->req_len; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + cmd = *(u32 *) smp_req; + kunmap_atomic(kaddr, KM_IRQ0); + + if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) + return SCI_FAILURE; + + sci_req->protocol = SCIC_SMP_PROTOCOL; /* byte swap the smp request. */ - sci_swab32_cpy(&sci_req->smp.cmd, &sci_req->smp.cmd, - word_cnt); task_context = scic_sds_request_get_task_context(sci_req); @@ -3307,7 +3345,7 @@ scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, * 18h ~ 30h, protocol specific * since commandIU has been build by framework at this point, we just * copy the frist DWord from command IU to this location. */ - memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); + memcpy(&task_context->type.smp, &cmd, sizeof(u32)); /* * 40h @@ -3347,48 +3385,12 @@ scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, * Copy the physical address for the command buffer to the SCU Task * Context command buffer should not contain command header. */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->smp.cmd) + - sizeof(u32)); - - task_context->command_iu_upper = upper_32_bits(dma_addr); - task_context->command_iu_lower = lower_32_bits(dma_addr); + task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); + task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); /* SMP response comes as UF, so no need to set response IU address. */ task_context->response_iu_upper = 0; task_context->response_iu_lower = 0; -} - -static enum sci_status -scic_io_request_construct_smp(struct scic_sds_request *sci_req) -{ - struct smp_req *smp_req = &sci_req->smp.cmd; - - sci_req->protocol = SCIC_SMP_PROTOCOL; - - /* - * Look at the SMP requests' header fields; for certain SAS 1.x SMP - * functions under SAS 2.0, a zero request length really indicates - * a non-zero default length. - */ - if (smp_req->req_len == 0) { - switch (smp_req->func) { - case SMP_DISCOVER: - case SMP_REPORT_PHY_ERR_LOG: - case SMP_REPORT_PHY_SATA: - case SMP_REPORT_ROUTE_INFO: - smp_req->req_len = 2; - break; - case SMP_CONF_ROUTE_INFO: - case SMP_PHY_CONTROL: - case SMP_PHY_TEST_FUNCTION: - smp_req->req_len = 9; - break; - /* Default - zero is a valid default for 2.0. */ - } - } - - scu_smp_request_construct_task_context(sci_req, smp_req->req_len); sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); @@ -3404,24 +3406,12 @@ scic_io_request_construct_smp(struct scic_sds_request *sci_req) */ static enum sci_status isci_smp_request_build(struct isci_request *ireq) { - enum sci_status status = SCI_FAILURE; struct sas_task *task = isci_request_access_task(ireq); + struct device *dev = &ireq->isci_host->pdev->dev; struct scic_sds_request *sci_req = &ireq->sci; + enum sci_status status = SCI_FAILURE; - dev_dbg(&ireq->isci_host->pdev->dev, - "%s: request = %p\n", __func__, ireq); - - dev_dbg(&ireq->isci_host->pdev->dev, - "%s: smp_req len = %d\n", - __func__, - task->smp_task.smp_req.length); - - /* copy the smp_command to the address; */ - sg_copy_to_buffer(&task->smp_task.smp_req, 1, - &sci_req->smp.cmd, - sizeof(struct smp_req)); - - status = scic_io_request_construct_smp(sci_req); + status = scic_io_request_construct_smp(dev, sci_req, task); if (status != SCI_SUCCESS) dev_warn(&ireq->isci_host->pdev->dev, "%s: failed with status = %d\n", diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 324fb7b3ab4..7c8b59a7c02 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -244,7 +244,6 @@ struct scic_sds_request { } ssp; struct { - struct smp_req cmd; struct smp_resp rsp; } smp; diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h index 822a8dbd19c..462b15174d3 100644 --- a/drivers/scsi/isci/sas.h +++ b/drivers/scsi/isci/sas.h @@ -190,8 +190,6 @@ struct smp_req_phycntl { u8 _r_h[3]; /* bytes 37-39 */ } __packed; -#define SMP_REQ_VENDOR_SPECIFIC_MAX_LEN 1016 - /* * struct smp_req - This structure simply unionizes the existing request * structures into a common request type. @@ -203,14 +201,7 @@ struct smp_req { u8 func; /* byte 1 */ u8 alloc_resp_len; /* byte 2 */ u8 req_len; /* byte 3 */ - - union { /* bytes 4-N */ - u32 smp_req_gen; - struct smp_req_phy_id phy_id; - struct smp_req_phycntl phy_cntl; - struct smp_req_conf_rtinfo conf_rt_info; - u8 vendor[SMP_REQ_VENDOR_SPECIFIC_MAX_LEN]; - }; + u8 req_data[0]; } __packed; #define SMP_RESP_HDR_SZ 4 -- cgit v1.2.3-70-g09d2 From 9274f45ea551421cd3bf329de9dd8d1e6208285a Mon Sep 17 00:00:00 2001 From: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Date: Thu, 23 Jun 2011 17:09:02 -0700 Subject: isci: Terminate dev requests on FIS err bit rx in NCQ When the remote device transitions to a not-ready state because of an NCQ error condition, all outstanding requests to that device are terminated and completed to libsas on the normal path. The device then waits for a READ LOG EXT command to issue on the task management path. Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/remote_device.c | 27 +++++++++++++++++++++++++-- drivers/scsi/isci/remote_device.h | 1 + drivers/scsi/isci/request.c | 27 ++++++++++++++++++++++++--- drivers/scsi/isci/request.h | 9 +++++++++ drivers/scsi/isci/task.c | 16 ++++++++++++++-- 5 files changed, 73 insertions(+), 7 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 9f45c2ba730..c5ce0f0f364 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -68,17 +68,39 @@ * @isci_host: This parameter specifies the isci host object. * @isci_device: This parameter specifies the remote device * + * scic_lock is held on entrance to this function. */ static void isci_remote_device_not_ready(struct isci_host *ihost, struct isci_remote_device *idev, u32 reason) { + struct isci_request * ireq; + dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); - if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED) + switch (reason) { + case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: set_bit(IDEV_GONE, &idev->flags); - else + break; + case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: + set_bit(IDEV_IO_NCQERROR, &idev->flags); + + /* Kill all outstanding requests for the device. */ + list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p request = %p\n", + __func__, idev, ireq); + + scic_controller_terminate_request(&ihost->sci, + &idev->sci, + &ireq->sci); + } + /* Fall through into the default case... */ + default: clear_bit(IDEV_IO_READY, &idev->flags); + break; + } } /** @@ -94,6 +116,7 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); + clear_bit(IDEV_IO_NCQERROR, &idev->flags); set_bit(IDEV_IO_READY, &idev->flags); if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) wake_up(&ihost->eventq); diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index cde595078f6..0d9e37fe734 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -136,6 +136,7 @@ struct isci_remote_device { #define IDEV_EH 3 #define IDEV_GONE 4 #define IDEV_IO_READY 5 + #define IDEV_IO_NCQERROR 6 unsigned long flags; struct kref kref; struct isci_port *isci_port; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 1043fed2a40..08a7340b33b 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -3587,9 +3587,30 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide spin_lock_irqsave(&ihost->scic_lock, flags); - /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&ihost->sci, &idev->sci, &ireq->sci, - SCI_CONTROLLER_INVALID_IO_TAG); + if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { + + if (isci_task_is_ncq_recovery(task)) { + + /* The device is in an NCQ recovery state. Issue the + * request on the task side. Note that it will + * complete on the I/O request side because the + * request was built that way (ie. + * ireq->is_task_management_request is false). + */ + status = scic_controller_start_task(&ihost->sci, + &idev->sci, + &ireq->sci, + SCI_CONTROLLER_INVALID_IO_TAG); + } else { + status = SCI_FAILURE; + } + } else { + + /* send the request, let the core assign the IO TAG. */ + status = scic_controller_start_io(&ihost->sci, &idev->sci, + &ireq->sci, + SCI_CONTROLLER_INVALID_IO_TAG); + } if (status != SCI_SUCCESS && status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { dev_warn(&ihost->pdev->dev, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 7c8b59a7c02..9130f22a63b 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -687,4 +687,13 @@ scic_task_request_construct_sata(struct scic_sds_request *sci_req); void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); + +static inline int isci_task_is_ncq_recovery(struct sas_task *task) +{ + return (sas_protocol_ata(task->task_proto) && + task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT && + task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); + +} + #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 0835a2c2dc7..157e9978183 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -133,6 +133,15 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, for (; num > 0; num--,\ task = list_entry(task->list.next, struct sas_task, list)) + +static inline int isci_device_io_ready(struct isci_remote_device *idev, + struct sas_task *task) +{ + return idev ? test_bit(IDEV_IO_READY, &idev->flags) || + (test_bit(IDEV_IO_NCQERROR, &idev->flags) && + isci_task_is_ncq_recovery(task)) + : 0; +} /** * isci_task_execute_task() - This function is one of the SAS Domain Template * functions. This function is called by libsas to send a task down to @@ -165,7 +174,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) for_each_sas_task(num, task) { spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_lookup_device(task->dev); - io_ready = idev ? test_bit(IDEV_IO_READY, &idev->flags) : 0; + io_ready = isci_device_io_ready(idev, task); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_dbg(&ihost->pdev->dev, @@ -178,6 +187,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) SAS_DEVICE_UNKNOWN); isci_host_can_dequeue(ihost, 1); } else if (!io_ready) { + /* Indicate QUEUE_FULL so that the scsi midlayer * retries. */ @@ -299,7 +309,9 @@ int isci_task_execute_tmf(struct isci_host *ihost, /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. */ - if (!isci_device || !test_bit(IDEV_IO_READY, &isci_device->flags)) { + if (!isci_device || + (!test_bit(IDEV_IO_READY, &isci_device->flags) && + !test_bit(IDEV_IO_NCQERROR, &isci_device->flags))) { dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p not ready (%#lx)\n", __func__, -- cgit v1.2.3-70-g09d2 From 312e0c2455c18716cf640d4336dcb1e9e5053818 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Tue, 28 Jun 2011 13:47:09 -0700 Subject: isci: unify can_queue tracking on the tci_pool, uplevel tag assignment The tci_pool tracks our outstanding command slots which are also the 'index' portion of our tags. Grabbing the tag early in ->lldd_execute_task let's us drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure. ->was_tag_assigned_by_user required the task context to be duplicated in request-local buffer. With the tci established early we can build the task_context directly into its final location and skip a memcpy. With the task context buffer at a known address at request construction we have the opportunity/obligation to also fix sgl handling. This rework feels like it belongs in another patch but the sgl handling and task_context are too intertwined. 1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in the task context (previously we were prematurely linking to the staging buffer). 2/ fix the broken iteration of pio sgls that assumes all sgls are relative to the request, and does a dangerous looking reverse lookup of physical address to virtual address. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 265 ++++++------------------- drivers/scsi/isci/host.h | 55 +----- drivers/scsi/isci/port.c | 61 +++--- drivers/scsi/isci/port.h | 2 +- drivers/scsi/isci/request.c | 469 +++++++++++++++----------------------------- drivers/scsi/isci/request.h | 58 +----- drivers/scsi/isci/task.c | 80 ++++---- 7 files changed, 303 insertions(+), 687 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index b08455f0d35..c99fab53dd0 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -1018,33 +1018,11 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -static void isci_tci_free(struct isci_host *ihost, u16 tci) -{ - u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); - - ihost->tci_pool[tail] = tci; - ihost->tci_tail = tail + 1; -} - -static u16 isci_tci_alloc(struct isci_host *ihost) -{ - u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); - u16 tci = ihost->tci_pool[head]; - - ihost->tci_head = head + 1; - return tci; -} - static u16 isci_tci_active(struct isci_host *ihost) { return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } -static u16 isci_tci_space(struct isci_host *ihost) -{ - return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); -} - static enum sci_status scic_controller_start(struct scic_sds_controller *scic, u32 timeout) { @@ -1205,6 +1183,11 @@ static void isci_host_completion_routine(unsigned long data) task->task_done(task); } } + + spin_lock_irq(&isci_host->scic_lock); + isci_free_tag(isci_host, request->sci.io_tag); + spin_unlock_irq(&isci_host->scic_lock); + /* Free the request object. */ isci_request_free(isci_host, request); } @@ -1242,6 +1225,7 @@ static void isci_host_completion_routine(unsigned long data) * of pending requests. */ list_del_init(&request->dev_node); + isci_free_tag(isci_host, request->sci.io_tag); spin_unlock_irq(&isci_host->scic_lock); /* Free the request object. */ @@ -2375,6 +2359,7 @@ static int scic_controller_mem_init(struct scic_sds_controller *scic) if (!scic->task_context_table) return -ENOMEM; + scic->task_context_dma = dma; writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); @@ -2409,11 +2394,9 @@ int isci_host_init(struct isci_host *isci_host) spin_lock_init(&isci_host->state_lock); spin_lock_init(&isci_host->scic_lock); - spin_lock_init(&isci_host->queue_lock); init_waitqueue_head(&isci_host->eventq); isci_host_change_state(isci_host, isci_starting); - isci_host->can_queue = ISCI_CAN_QUEUE_VAL; status = scic_controller_construct(&isci_host->sci, scu_base(isci_host), smu_base(isci_host)); @@ -2611,51 +2594,6 @@ void scic_sds_controller_post_request( writel(request, &scic->smu_registers->post_context_port); } -/** - * This method will copy the soft copy of the task context into the physical - * memory accessible by the controller. - * @scic: This parameter specifies the controller for which to copy - * the task context. - * @sci_req: This parameter specifies the request for which the task - * context is being copied. - * - * After this call is made the SCIC_SDS_IO_REQUEST object will always point to - * the physical memory version of the task context. Thus, all subsequent - * updates to the task context are performed in the TC table (i.e. DMAable - * memory). none - */ -void scic_sds_controller_copy_task_context( - struct scic_sds_controller *scic, - struct scic_sds_request *sci_req) -{ - struct scu_task_context *task_context_buffer; - - task_context_buffer = scic_sds_controller_get_task_context_buffer( - scic, sci_req->io_tag); - - memcpy(task_context_buffer, - sci_req->task_context_buffer, - offsetof(struct scu_task_context, sgl_snapshot_ac)); - - /* - * Now that the soft copy of the TC has been copied into the TC - * table accessible by the silicon. Thus, any further changes to - * the TC (e.g. TC termination) occur in the appropriate location. */ - sci_req->task_context_buffer = task_context_buffer; -} - -struct scu_task_context *scic_sds_controller_get_task_context_buffer(struct scic_sds_controller *scic, - u16 io_tag) -{ - u16 tci = ISCI_TAG_TCI(io_tag); - - if (tci < scic->task_context_entries) { - return &scic->task_context_table[tci]; - } - - return NULL; -} - struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) { u16 task_index; @@ -2801,6 +2739,60 @@ void scic_sds_controller_release_frame( &scic->scu_registers->sdma.unsolicited_frame_get_pointer); } +void isci_tci_free(struct isci_host *ihost, u16 tci) +{ + u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); + + ihost->tci_pool[tail] = tci; + ihost->tci_tail = tail + 1; +} + +static u16 isci_tci_alloc(struct isci_host *ihost) +{ + u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); + u16 tci = ihost->tci_pool[head]; + + ihost->tci_head = head + 1; + return tci; +} + +static u16 isci_tci_space(struct isci_host *ihost) +{ + return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); +} + +u16 isci_alloc_tag(struct isci_host *ihost) +{ + if (isci_tci_space(ihost)) { + u16 tci = isci_tci_alloc(ihost); + u8 seq = ihost->sci.io_request_sequence[tci]; + + return ISCI_TAG(seq, tci); + } + + return SCI_CONTROLLER_INVALID_IO_TAG; +} + +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) +{ + struct scic_sds_controller *scic = &ihost->sci; + u16 tci = ISCI_TAG_TCI(io_tag); + u16 seq = ISCI_TAG_SEQ(io_tag); + + /* prevent tail from passing head */ + if (isci_tci_active(ihost) == 0) + return SCI_FAILURE_INVALID_IO_TAG; + + if (seq == scic->io_request_sequence[tci]) { + scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); + + isci_tci_free(ihost, tci); + + return SCI_SUCCESS; + } + return SCI_FAILURE_INVALID_IO_TAG; +} + /** * scic_controller_start_io() - This method is called by the SCI user to * send/start an IO request. If the method invocation is successful, then @@ -2811,27 +2803,11 @@ void scic_sds_controller_release_frame( * IO request. * @io_request: the handle to the io request object to start. * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. This parameter is optional. - * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value - * for this parameter. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a - * result, it is expected the user will have set the NCQ tag field in the host - * to device register FIS prior to calling this method. There is also a - * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking - * the scic_controller_start_io() method. scic_controller_allocate_tag() for - * more information on allocating a tag. Indicate if the controller - * successfully started the IO request. SCI_SUCCESS if the IO request was - * successfully started. Determine the failure situations and return values. + * user desires to be utilized for this request. */ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *req, - u16 io_tag) + struct scic_sds_request *req) { enum sci_status status; @@ -2902,17 +2878,6 @@ enum sci_status scic_controller_terminate_request( * @remote_device: The handle to the remote device object for which to complete * the IO request. * @io_request: the handle to the io request object to complete. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI - * Core user, using the scic_controller_allocate_io_tag() method, then it is - * the responsibility of the caller to invoke the scic_controller_free_io_tag() - * method to free the tag (i.e. this method will not free the IO tag). Indicate - * if the controller successfully completed the IO request. SCI_SUCCESS if the - * completion process was successful. */ enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, @@ -2963,31 +2928,11 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) * @remote_device: the handle to the remote device object for which to start * the task management request. * @task_request: the handle to the task request object to start. - * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. Note this not the io_tag - * of the request being managed. It is to be utilized for the task request - * itself. This parameter is optional. The user is allowed to supply - * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - The user must synchronize this task with completion - * queue processing. If they are not synchronized then it is possible for the - * io requests that are being managed by the task request can complete before - * starting the task request. scic_controller_allocate_tag() for more - * information on allocating a tag. Indicate if the controller successfully - * started the IO request. SCI_TASK_SUCCESS if the task request was - * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is - * returned if there is/are task(s) outstanding that require termination or - * completion before this request can succeed. */ enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *req, - u16 task_tag) + struct scic_sds_request *req) { enum sci_status status; @@ -3022,85 +2967,3 @@ enum sci_task_status scic_controller_start_task( return status; } - -/** - * scic_controller_allocate_io_tag() - This method will allocate a tag from the - * pool of free IO tags. Direct allocation of IO tags by the SCI Core user - * is optional. The scic_controller_start_io() method will allocate an IO - * tag if this method is not utilized and the tag is not supplied to the IO - * construct routine. Direct allocation of IO tags may provide additional - * performance improvements in environments capable of supporting this usage - * model. Additionally, direct allocation of IO tags also provides - * additional flexibility to the SCI Core user. Specifically, the user may - * retain IO tags across the lives of multiple IO requests. - * @controller: the handle to the controller object for which to allocate the - * tag. - * - * IO tags are a protected resource. It is incumbent upon the SCI Core user to - * ensure that each of the methods that may allocate or free available IO tags - * are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). An unsigned integer representing an available IO tag. - * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no - * currently available tags to be allocated. All return other values indicate a - * legitimate tag. - */ -u16 scic_controller_allocate_io_tag(struct scic_sds_controller *scic) -{ - struct isci_host *ihost = scic_to_ihost(scic); - - if (isci_tci_space(ihost)) { - u16 tci = isci_tci_alloc(ihost); - u8 seq = scic->io_request_sequence[tci]; - - return ISCI_TAG(seq, tci); - } - - return SCI_CONTROLLER_INVALID_IO_TAG; -} - -/** - * scic_controller_free_io_tag() - This method will free an IO tag to the pool - * of free IO tags. This method provides the SCI Core user more flexibility - * with regards to IO tags. The user may desire to keep an IO tag after an - * IO request has completed, because they plan on re-using the tag for a - * subsequent IO request. This method is only legal if the tag was - * allocated via scic_controller_allocate_io_tag(). - * @controller: This parameter specifies the handle to the controller object - * for which to free/return the tag. - * @io_tag: This parameter represents the tag to be freed to the pool of - * available tags. - * - * - IO tags are a protected resource. It is incumbent upon the SCI Core user - * to ensure that each of the methods that may allocate or free available IO - * tags are handled in a mutually exclusive manner. This method is one of said - * methods requiring proper critical code section protection (e.g. semaphore, - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI - * Core user, using the scic_controller_allocate_io_tag() method, then it is - * the responsibility of the caller to invoke this method to free the tag. This - * method returns an indication of whether the tag was successfully put back - * (freed) to the pool of available tags. SCI_SUCCESS This return value - * indicates the tag was successfully placed into the pool of available IO - * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag - * is not a valid IO tag value. - */ -enum sci_status scic_controller_free_io_tag(struct scic_sds_controller *scic, - u16 io_tag) -{ - struct isci_host *ihost = scic_to_ihost(scic); - u16 tci = ISCI_TAG_TCI(io_tag); - u16 seq = ISCI_TAG_SEQ(io_tag); - - /* prevent tail from passing head */ - if (isci_tci_active(ihost) == 0) - return SCI_FAILURE_INVALID_IO_TAG; - - if (seq == scic->io_request_sequence[tci]) { - scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); - - isci_tci_free(ihost, ISCI_TAG_TCI(io_tag)); - - return SCI_SUCCESS; - } - return SCI_FAILURE_INVALID_IO_TAG; -} diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index a54397e1bf1..d8164f5d798 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -192,6 +192,7 @@ struct scic_sds_controller { * context table. This data is shared between the hardware and software. */ struct scu_task_context *task_context_table; + dma_addr_t task_context_dma; /** * This field is a pointer to the memory allocated by the driver for the @@ -302,12 +303,8 @@ struct isci_host { struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ struct sas_ha_struct sas_ha; - int can_queue; - spinlock_t queue_lock; spinlock_t state_lock; - struct pci_dev *pdev; - enum isci_status status; #define IHOST_START_PENDING 0 #define IHOST_STOP_PENDING 1 @@ -451,36 +448,6 @@ static inline void isci_host_change_state(struct isci_host *isci_host, } -static inline int isci_host_can_queue(struct isci_host *isci_host, int num) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&isci_host->queue_lock, flags); - if ((isci_host->can_queue - num) < 0) { - dev_dbg(&isci_host->pdev->dev, - "%s: isci_host->can_queue = %d\n", - __func__, - isci_host->can_queue); - ret = -SAS_QUEUE_FULL; - - } else - isci_host->can_queue -= num; - - spin_unlock_irqrestore(&isci_host->queue_lock, flags); - - return ret; -} - -static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num) -{ - unsigned long flags; - - spin_lock_irqsave(&isci_host->queue_lock, flags); - isci_host->can_queue += num; - spin_unlock_irqrestore(&isci_host->queue_lock, flags); -} - static inline void wait_for_start(struct isci_host *ihost) { wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); @@ -646,10 +613,6 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag); -struct scu_task_context *scic_sds_controller_get_task_context_buffer( - struct scic_sds_controller *scic, - u16 io_tag); - void scic_sds_controller_power_control_queue_insert( struct scic_sds_controller *scic, struct scic_sds_phy *sci_phy); @@ -681,6 +644,9 @@ void scic_sds_controller_register_setup(struct scic_sds_controller *scic); enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); void isci_host_scan_start(struct Scsi_Host *); +u16 isci_alloc_tag(struct isci_host *ihost); +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); +void isci_tci_free(struct isci_host *ihost, u16 tci); int isci_host_init(struct isci_host *); @@ -708,14 +674,12 @@ void scic_controller_disable_interrupts( enum sci_status scic_controller_start_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *io_request, - u16 io_tag); + struct scic_sds_request *io_request); enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *task_request, - u16 io_tag); + struct scic_sds_request *task_request); enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, @@ -727,13 +691,6 @@ enum sci_status scic_controller_complete_io( struct scic_sds_remote_device *remote_device, struct scic_sds_request *io_request); -u16 scic_controller_allocate_io_tag( - struct scic_sds_controller *scic); - -enum sci_status scic_controller_free_io_tag( - struct scic_sds_controller *scic, - u16 io_tag); - void scic_sds_port_configuration_agent_construct( struct scic_sds_port_configuration_agent *port_agent); diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 5f4a4e3954d..0e84e29335d 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -695,35 +695,21 @@ static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u1 */ static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag) { + struct scic_sds_controller *scic = sci_port->owning_controller; struct scu_task_context *task_context; - task_context = scic_sds_controller_get_task_context_buffer(sci_port->owning_controller, tag); - + task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)]; memset(task_context, 0, sizeof(struct scu_task_context)); - task_context->abort = 0; - task_context->priority = 0; task_context->initiator_request = 1; task_context->connection_rate = 1; - task_context->protocol_engine_index = 0; task_context->logical_port_index = sci_port->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->task_index = ISCI_TAG_TCI(tag); task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = sci_port->reserved_rni; - task_context->command_code = 0; - - task_context->link_layer_control = 0; task_context->do_not_dma_ssp_good_response = 1; - task_context->strict_ordering = 0; - task_context->control_frame = 0; - task_context->timeout_enable = 0; - task_context->block_guard_enable = 0; - - task_context->address_modifier = 0; - task_context->task_phase = 0x01; } @@ -731,15 +717,15 @@ static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port { struct scic_sds_controller *scic = sci_port->owning_controller; - if (sci_port->reserved_tci != SCU_DUMMY_INDEX) - scic_controller_free_io_tag(scic, sci_port->reserved_tci); + if (sci_port->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) + isci_free_tag(scic_to_ihost(scic), sci_port->reserved_tag); if (sci_port->reserved_rni != SCU_DUMMY_INDEX) scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, 1, sci_port->reserved_rni); sci_port->reserved_rni = SCU_DUMMY_INDEX; - sci_port->reserved_tci = SCU_DUMMY_INDEX; + sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; } /** @@ -1119,18 +1105,17 @@ scic_sds_port_suspend_port_task_scheduler(struct scic_sds_port *port) */ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) { - u32 command; - struct scu_task_context *task_context; struct scic_sds_controller *scic = sci_port->owning_controller; - u16 tci = sci_port->reserved_tci; - - task_context = scic_sds_controller_get_task_context_buffer(scic, tci); + u16 tag = sci_port->reserved_tag; + struct scu_task_context *tc; + u32 command; - task_context->abort = 0; + tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; + tc->abort = 0; command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | - tci; + ISCI_TAG_TCI(tag); scic_sds_controller_post_request(scic, command); } @@ -1145,17 +1130,16 @@ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) { struct scic_sds_controller *scic = sci_port->owning_controller; - u16 tci = sci_port->reserved_tci; + u16 tag = sci_port->reserved_tag; struct scu_task_context *tc; u32 command; - tc = scic_sds_controller_get_task_context_buffer(scic, tci); - + tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; tc->abort = 1; command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | - tci; + ISCI_TAG_TCI(tag); scic_sds_controller_post_request(scic, command); } @@ -1333,15 +1317,16 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port) sci_port->reserved_rni = rni; } - if (sci_port->reserved_tci == SCU_DUMMY_INDEX) { - /* Allocate a TCI and remove the sequence nibble */ - u16 tci = scic_controller_allocate_io_tag(scic); + if (sci_port->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + struct isci_host *ihost = scic_to_ihost(scic); + u16 tag; - if (tci != SCU_DUMMY_INDEX) - scic_sds_port_construct_dummy_task(sci_port, tci); - else + tag = isci_alloc_tag(ihost); + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) status = SCI_FAILURE_INSUFFICIENT_RESOURCES; - sci_port->reserved_tci = tci; + else + scic_sds_port_construct_dummy_task(sci_port, tag); + sci_port->reserved_tag = tag; } if (status == SCI_SUCCESS) { @@ -1859,7 +1844,7 @@ void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index, sci_port->assigned_device_count = 0; sci_port->reserved_rni = SCU_DUMMY_INDEX; - sci_port->reserved_tci = SCU_DUMMY_INDEX; + sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; sci_init_timer(&sci_port->timer, port_timeout); diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 45c01f80bf8..a44e541914f 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -108,7 +108,7 @@ struct scic_sds_port { u8 active_phy_mask; u16 reserved_rni; - u16 reserved_tci; + u16 reserved_tag; /** * This field contains the count of the io requests started on this port diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 08a7340b33b..55859d5331b 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -61,42 +61,50 @@ #include "scu_event_codes.h" #include "sas.h" -/** - * This method returns the sgl element pair for the specificed sgl_pair index. - * @sci_req: This parameter specifies the IO request for which to retrieve - * the Scatter-Gather List element pair. - * @sgl_pair_index: This parameter specifies the index into the SGL element - * pair to be retrieved. - * - * This method returns a pointer to an struct scu_sgl_element_pair. - */ -static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( - struct scic_sds_request *sci_req, - u32 sgl_pair_index - ) { - struct scu_task_context *task_context; +static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, + int idx) +{ + if (idx == 0) + return &sci_req->tc->sgl_pair_ab; + else if (idx == 1) + return &sci_req->tc->sgl_pair_cd; + else if (idx < 0) + return NULL; + else + return &sci_req->sg_table[idx - 2]; +} - task_context = (struct scu_task_context *)sci_req->task_context_buffer; +static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, + struct scic_sds_request *sci_req, u32 idx) +{ + u32 offset; - if (sgl_pair_index == 0) { - return &task_context->sgl_pair_ab; - } else if (sgl_pair_index == 1) { - return &task_context->sgl_pair_cd; + if (idx == 0) { + offset = (void *) &sci_req->tc->sgl_pair_ab - + (void *) &scic->task_context_table[0]; + return scic->task_context_dma + offset; + } else if (idx == 1) { + offset = (void *) &sci_req->tc->sgl_pair_cd - + (void *) &scic->task_context_table[0]; + return scic->task_context_dma + offset; } - return &sci_req->sg_table[sgl_pair_index - 2]; + return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); +} + +static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) +{ + e->length = sg_dma_len(sg); + e->address_upper = upper_32_bits(sg_dma_address(sg)); + e->address_lower = lower_32_bits(sg_dma_address(sg)); + e->address_modifier = 0; } -/** - * This function will build the SGL list for an IO request. - * @sci_req: This parameter specifies the IO request for which to build - * the Scatter-Gather List. - * - */ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) { struct isci_request *isci_request = sci_req_to_ireq(sds_request); struct isci_host *isci_host = isci_request->isci_host; + struct scic_sds_controller *scic = &isci_host->sci; struct sas_task *task = isci_request_access_task(isci_request); struct scatterlist *sg = NULL; dma_addr_t dma_addr; @@ -108,25 +116,19 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg = task->scatter; while (sg) { - scu_sg = scic_sds_request_get_sgl_element_pair( - sds_request, - sg_idx); - - SCU_SGL_COPY(scu_sg->A, sg); - + scu_sg = to_sgl_element_pair(sds_request, sg_idx); + init_sgl_element(&scu_sg->A, sg); sg = sg_next(sg); - if (sg) { - SCU_SGL_COPY(scu_sg->B, sg); + init_sgl_element(&scu_sg->B, sg); sg = sg_next(sg); } else - SCU_SGL_ZERO(scu_sg->B); + memset(&scu_sg->B, 0, sizeof(scu_sg->B)); if (prev_sg) { - dma_addr = - scic_io_request_get_dma_addr( - sds_request, - scu_sg); + dma_addr = to_sgl_element_pair_dma(scic, + sds_request, + sg_idx); prev_sg->next_pair_upper = upper_32_bits(dma_addr); @@ -138,8 +140,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg_idx++; } } else { /* handle when no sg */ - scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, - sg_idx); + scu_sg = to_sgl_element_pair(sds_request, sg_idx); dma_addr = dma_map_single(&isci_host->pdev->dev, task->scatter, @@ -246,35 +247,12 @@ static void scu_ssp_reqeust_construct_task_context( /* task_context->type.ssp.tag = sci_req->io_tag; */ task_context->task_phase = 0x01; - if (sds_request->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sds_request->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sds_request->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data - * - * I/O tag index is not assigned because we have to wait - * until we get a TCi - */ - sds_request->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - owning_controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } + sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(sds_request->io_tag)); /* * Copy the physical address for the command buffer to the @@ -302,14 +280,11 @@ static void scu_ssp_reqeust_construct_task_context( * @sci_req: * */ -static void scu_ssp_io_request_construct_task_context( - struct scic_sds_request *sci_req, - enum dma_data_direction dir, - u32 len) +static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, + enum dma_data_direction dir, + u32 len) { - struct scu_task_context *task_context; - - task_context = scic_sds_request_get_task_context(sci_req); + struct scu_task_context *task_context = sci_req->tc; scu_ssp_reqeust_construct_task_context(sci_req, task_context); @@ -347,12 +322,9 @@ static void scu_ssp_io_request_construct_task_context( * constructed. * */ -static void scu_ssp_task_request_construct_task_context( - struct scic_sds_request *sci_req) +static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) { - struct scu_task_context *task_context; - - task_context = scic_sds_request_get_task_context(sci_req); + struct scu_task_context *task_context = sci_req->tc; scu_ssp_reqeust_construct_task_context(sci_req, task_context); @@ -421,35 +393,12 @@ static void scu_sata_reqeust_construct_task_context( /* Set the first word of the H2D REG FIS */ task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group( - controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(target_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - + sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(controller) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(target_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(sci_req->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the @@ -467,22 +416,9 @@ static void scu_sata_reqeust_construct_task_context( task_context->response_iu_lower = 0; } - - -/** - * scu_stp_raw_request_construct_task_context - - * @sci_req: This parameter specifies the STP request object for which to - * construct a RAW command frame task context. - * @task_context: This parameter specifies the SCU specific task context buffer - * to construct. - * - * This method performs the operations common to all SATA/STP requests - * utilizing the raw frame method. none - */ -static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req, - struct scu_task_context *task_context) +static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) { - struct scic_sds_request *sci_req = to_sci_req(stp_req); + struct scu_task_context *task_context = sci_req->tc; scu_sata_reqeust_construct_task_context(sci_req, task_context); @@ -500,8 +436,7 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, struct scic_sds_stp_request *stp_req = &sci_req->stp.req; struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; - scu_stp_raw_request_construct_task_context(stp_req, - sci_req->task_context_buffer); + scu_stp_raw_request_construct_task_context(sci_req); pio->current_transfer_bytes = 0; pio->ending_error = 0; @@ -512,13 +447,10 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, if (copy_rx_frame) { scic_sds_request_build_sgl(sci_req); - /* Since the IO request copy of the TC contains the same data as - * the actual TC this pointer is vaild for either. - */ - pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; + pio->request_current.sgl_index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ - pio->request_current.sgl_pair = NULL; + pio->request_current.sgl_index = -1; } return SCI_SUCCESS; @@ -541,7 +473,7 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc u32 len, enum dma_data_direction dir) { - struct scu_task_context *task_context = sci_req->task_context_buffer; + struct scu_task_context *task_context = sci_req->tc; /* Build the STP task context structure */ scu_sata_reqeust_construct_task_context(sci_req, task_context); @@ -587,8 +519,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(&sci_req->stp.req, - sci_req->task_context_buffer); + scu_stp_raw_request_construct_task_context(sci_req); return SCI_SUCCESS; } else { dev_err(scic_to_dev(sci_req->owning_controller), @@ -611,8 +542,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, /* non data */ if (task->data_dir == DMA_NONE) { - scu_stp_raw_request_construct_task_context(&sci_req->stp.req, - sci_req->task_context_buffer); + scu_stp_raw_request_construct_task_context(sci_req); return SCI_SUCCESS; } @@ -701,8 +631,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(&sci_req->stp.req, - sci_req->task_context_buffer); + scu_stp_raw_request_construct_task_context(sci_req); } else { dev_err(scic_to_dev(sci_req->owning_controller), "%s: Request 0x%p received un-handled SAT " @@ -749,9 +678,9 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) { - struct scic_sds_controller *scic = sci_req->owning_controller; - struct scu_task_context *task_context; enum sci_base_request_states state; + struct scu_task_context *tc = sci_req->tc; + struct scic_sds_controller *scic = sci_req->owning_controller; state = sci_req->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { @@ -761,61 +690,39 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) return SCI_FAILURE_INVALID_STATE; } - /* if necessary, allocate a TCi for the io request object and then will, - * if necessary, copy the constructed TC data into the actual TC buffer. - * If everything is successful the post context field is updated with - * the TCi so the controller can post the request to the hardware. - */ - if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) - sci_req->io_tag = scic_controller_allocate_io_tag(scic); - - /* Record the IO Tag in the request */ - if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { - task_context = sci_req->task_context_buffer; - - task_context->task_index = ISCI_TAG_TCI(sci_req->io_tag); - - switch (task_context->protocol_type) { - case SCU_TASK_CONTEXT_PROTOCOL_SMP: - case SCU_TASK_CONTEXT_PROTOCOL_SSP: - /* SSP/SMP Frame */ - task_context->type.ssp.tag = sci_req->io_tag; - task_context->type.ssp.target_port_transfer_tag = - 0xFFFF; - break; + tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); - case SCU_TASK_CONTEXT_PROTOCOL_STP: - /* STP/SATA Frame - * task_context->type.stp.ncq_tag = sci_req->ncq_tag; - */ - break; - - case SCU_TASK_CONTEXT_PROTOCOL_NONE: - /* / @todo When do we set no protocol type? */ - break; + switch (tc->protocol_type) { + case SCU_TASK_CONTEXT_PROTOCOL_SMP: + case SCU_TASK_CONTEXT_PROTOCOL_SSP: + /* SSP/SMP Frame */ + tc->type.ssp.tag = sci_req->io_tag; + tc->type.ssp.target_port_transfer_tag = 0xFFFF; + break; - default: - /* This should never happen since we build the IO - * requests */ - break; - } + case SCU_TASK_CONTEXT_PROTOCOL_STP: + /* STP/SATA Frame + * tc->type.stp.ncq_tag = sci_req->ncq_tag; + */ + break; - /* - * Check to see if we need to copy the task context buffer - * or have been building into the task context buffer */ - if (sci_req->was_tag_assigned_by_user == false) - scic_sds_controller_copy_task_context(scic, sci_req); + case SCU_TASK_CONTEXT_PROTOCOL_NONE: + /* / @todo When do we set no protocol type? */ + break; - /* Add to the post_context the io tag value */ - sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); + default: + /* This should never happen since we build the IO + * requests */ + break; + } - /* Everything is good go ahead and change state */ - sci_change_state(&sci_req->sm, SCI_REQ_STARTED); + /* Add to the post_context the io tag value */ + sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); - return SCI_SUCCESS; - } + /* Everything is good go ahead and change state */ + sci_change_state(&sci_req->sm, SCI_REQ_STARTED); - return SCI_FAILURE_INSUFFICIENT_RESOURCES; + return SCI_SUCCESS; } enum sci_status @@ -880,9 +787,6 @@ enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) "isci: request completion from wrong state (%d)\n", state)) return SCI_FAILURE_INVALID_STATE; - if (!sci_req->was_tag_assigned_by_user) - scic_controller_free_io_tag(scic, sci_req->io_tag); - if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) scic_sds_controller_release_frame(scic, sci_req->saved_rx_frame_index); @@ -1244,51 +1148,40 @@ void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, * @note This could be made to return an error to the user if the user * attempts to set the NCQ tag in the wrong state. */ - req->task_context_buffer->type.stp.ncq_tag = ncq_tag; + req->tc->type.stp.ncq_tag = ncq_tag; } -/** - * - * @sci_req: - * - * Get the next SGL element from the request. - Check on which SGL element pair - * we are working - if working on SLG pair element A - advance to element B - - * else - check to see if there are more SGL element pairs for this IO request - * - if there are more SGL element pairs - advance to the next pair and return - * element A struct scu_sgl_element* - */ -static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) +static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req) { - struct scu_sgl_element *current_sgl; + struct scu_sgl_element *sgl; + struct scu_sgl_element_pair *sgl_pair; struct scic_sds_request *sci_req = to_sci_req(stp_req); struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; - if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - if (pio_sgl->sgl_pair->B.address_lower == 0 && - pio_sgl->sgl_pair->B.address_upper == 0) { - current_sgl = NULL; + sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); + if (!sgl_pair) + sgl = NULL; + else if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + if (sgl_pair->B.address_lower == 0 && + sgl_pair->B.address_upper == 0) { + sgl = NULL; } else { pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; - current_sgl = &pio_sgl->sgl_pair->B; + sgl = &sgl_pair->B; } } else { - if (pio_sgl->sgl_pair->next_pair_lower == 0 && - pio_sgl->sgl_pair->next_pair_upper == 0) { - current_sgl = NULL; + if (sgl_pair->next_pair_lower == 0 && + sgl_pair->next_pair_upper == 0) { + sgl = NULL; } else { - u64 phys_addr; - - phys_addr = pio_sgl->sgl_pair->next_pair_upper; - phys_addr <<= 32; - phys_addr |= pio_sgl->sgl_pair->next_pair_lower; - - pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); + pio_sgl->sgl_index++; pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; - current_sgl = &pio_sgl->sgl_pair->A; + sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); + sgl = &sgl_pair->A; } } - return current_sgl; + return sgl; } static enum sci_status @@ -1328,21 +1221,19 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( struct scic_sds_request *sci_req, u32 length) { - struct scic_sds_controller *scic = sci_req->owning_controller; struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scu_task_context *task_context; + struct scu_task_context *task_context = sci_req->tc; + struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *current_sgl; /* Recycle the TC and reconstruct it for sending out DATA FIS containing * for the data from current_sgl+offset for the input length */ - task_context = scic_sds_controller_get_task_context_buffer(scic, - sci_req->io_tag); - + sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) - current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; + current_sgl = &sgl_pair->A; else - current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; + current_sgl = &sgl_pair->B; /* update the TC */ task_context->command_iu_upper = current_sgl->address_upper; @@ -1362,18 +1253,21 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc u32 remaining_bytes_in_current_sgl = 0; enum sci_status status = SCI_SUCCESS; struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct scu_sgl_element_pair *sgl_pair; sgl_offset = stp_req->type.pio.request_current.sgl_offset; + sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); + if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) + return SCI_FAILURE; if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; + current_sgl = &sgl_pair->A; + remaining_bytes_in_current_sgl = sgl_pair->A.length - sgl_offset; } else { - current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); - remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; + current_sgl = &sgl_pair->B; + remaining_bytes_in_current_sgl = sgl_pair->B.length - sgl_offset; } - if (stp_req->type.pio.pio_transfer_bytes > 0) { if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ @@ -1382,7 +1276,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; /* update the current sgl, sgl_offset and save for future */ - current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); + current_sgl = pio_sgl_next(stp_req); sgl_offset = 0; } } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { @@ -1945,7 +1839,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, return status; } - if (stp_req->type.pio.request_current.sgl_pair == NULL) { + if (stp_req->type.pio.request_current.sgl_index < 0) { sci_req->saved_rx_frame_index = frame_index; stp_req->type.pio.pio_transfer_bytes = 0; } else { @@ -2977,8 +2871,6 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * task to recognize the already completed case. */ request->terminated = true; - - isci_host_can_dequeue(isci_host, 1); } static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) @@ -3039,7 +2931,7 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); /* Setting the abort bit in the Task Context is required by the silicon. */ - sci_req->task_context_buffer->abort = 1; + sci_req->tc->abort = 1; } static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) @@ -3069,7 +2961,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completio static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) { struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); - struct scu_task_context *task_context; + struct scu_task_context *tc = sci_req->tc; struct host_to_dev_fis *h2d_fis; enum sci_status status; @@ -3078,9 +2970,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet h2d_fis->control = 0; /* Clear the TC control bit */ - task_context = scic_sds_controller_get_task_context_buffer( - sci_req->owning_controller, sci_req->io_tag); - task_context->control_frame = 0; + tc->control_frame = 0; status = scic_controller_continue_io(sci_req); WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); @@ -3141,18 +3031,10 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, sci_req->sci_status = SCI_SUCCESS; sci_req->scu_status = 0; sci_req->post_context = 0xFFFFFFFF; + sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)]; sci_req->is_task_management_request = false; - - if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - sci_req->was_tag_assigned_by_user = false; - sci_req->task_context_buffer = &sci_req->tc; - } else { - sci_req->was_tag_assigned_by_user = true; - - sci_req->task_context_buffer = - scic_sds_controller_get_task_context_buffer(scic, io_tag); - } + WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n"); } static enum sci_status @@ -3178,8 +3060,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - memset(sci_req->task_context_buffer, 0, - offsetof(struct scu_task_context, sgl_pair_ab)); + memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); return status; } @@ -3197,7 +3078,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { sci_req->is_task_management_request = true; - memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); + memset(sci_req->tc, 0, sizeof(struct scu_task_context)); } else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; @@ -3299,7 +3180,7 @@ scic_io_request_construct_smp(struct device *dev, /* byte swap the smp request. */ - task_context = scic_sds_request_get_task_context(sci_req); + task_context = sci_req->tc; sci_dev = scic_sds_request_get_device(sci_req); sci_port = scic_sds_request_get_port(sci_req); @@ -3354,33 +3235,12 @@ scic_io_request_construct_smp(struct device *dev, */ task_context->task_phase = 0; - if (sci_req->was_tag_assigned_by_user) { - /* - * Build the task context now since we have already read - * the data - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sci_req->io_tag)); - } else { - /* - * Build the task context now since we have already read - * the data. - * I/O tag index is not assigned because we have to wait - * until we get a TCi. - */ - sci_req->post_context = - (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(sci_port) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); - } - + sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (scic_sds_controller_get_protocol_engine_group(scic) << + SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (scic_sds_port_get_index(sci_port) << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(sci_req->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context command buffer should not contain command header. @@ -3431,10 +3291,10 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) * * SCI_SUCCESS on successfull completion, or specific failure code. */ -static enum sci_status isci_io_request_build( - struct isci_host *isci_host, - struct isci_request *request, - struct isci_remote_device *isci_device) +static enum sci_status isci_io_request_build(struct isci_host *isci_host, + struct isci_request *request, + struct isci_remote_device *isci_device, + u16 tag) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(request); @@ -3471,8 +3331,7 @@ static enum sci_status isci_io_request_build( * we will let the core allocate the IO tag. */ status = scic_io_request_construct(&isci_host->sci, sci_device, - SCI_CONTROLLER_INVALID_IO_TAG, - &request->sci); + tag, &request->sci); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -3564,7 +3423,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, } int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, - struct sas_task *task, gfp_t gfp_flags) + struct sas_task *task, u16 tag, gfp_t gfp_flags) { enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; struct isci_request *ireq; @@ -3576,7 +3435,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide if (!ireq) goto out; - status = isci_io_request_build(ihost, ireq, idev); + status = isci_io_request_build(ihost, ireq, idev, tag); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: request_construct failed - status = 0x%x\n", @@ -3599,18 +3458,16 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide */ status = scic_controller_start_task(&ihost->sci, &idev->sci, - &ireq->sci, - SCI_CONTROLLER_INVALID_IO_TAG); + &ireq->sci); } else { status = SCI_FAILURE; } } else { - /* send the request, let the core assign the IO TAG. */ status = scic_controller_start_io(&ihost->sci, &idev->sci, - &ireq->sci, - SCI_CONTROLLER_INVALID_IO_TAG); + &ireq->sci); } + if (status != SCI_SUCCESS && status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { dev_warn(&ihost->pdev->dev, @@ -3647,23 +3504,23 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide if (status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { /* Signal libsas that we need the SCSI error - * handler thread to work on this I/O and that - * we want a device reset. - */ + * handler thread to work on this I/O and that + * we want a device reset. + */ spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; spin_unlock_irqrestore(&task->task_state_lock, flags); /* Cause this task to be scheduled in the SCSI error - * handler thread. - */ + * handler thread. + */ isci_execpath_callback(ihost, task, sas_task_abort); /* Change the status, since we are holding - * the I/O until it is managed by the SCSI - * error handler. - */ + * the I/O until it is managed by the SCSI + * error handler. + */ status = SCI_SUCCESS; } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 9130f22a63b..8c77c4cbe04 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -136,7 +136,7 @@ struct scic_sds_stp_request { u8 ending_error; struct scic_sds_request_pio_sgl { - struct scu_sgl_element_pair *sgl_pair; + int sgl_index; u8 sgl_set; u32 sgl_offset; } request_current; @@ -171,12 +171,6 @@ struct scic_sds_request { */ struct scic_sds_remote_device *target_device; - /* - * This field is utilized to determine if the SCI user is managing - * the IO tag for this request or if the core is managing it. - */ - bool was_tag_assigned_by_user; - /* * This field indicates the IO tag for this request. The IO tag is * comprised of the task_index and a sequence count. The sequence count @@ -209,8 +203,7 @@ struct scic_sds_request { */ u32 post_context; - struct scu_task_context *task_context_buffer; - struct scu_task_context tc ____cacheline_aligned; + struct scu_task_context *tc; /* could be larger with sg chaining */ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) @@ -465,35 +458,6 @@ enum sci_base_request_states { (request)->sci_status = (sci_status_code); \ } -/** - * SCU_SGL_ZERO() - - * - * This macro zeros the hardware SGL element data - */ -#define SCU_SGL_ZERO(scu_sge) \ - { \ - (scu_sge).length = 0; \ - (scu_sge).address_lower = 0; \ - (scu_sge).address_upper = 0; \ - (scu_sge).address_modifier = 0; \ - } - -/** - * SCU_SGL_COPY() - - * - * This macro copys the SGL Element data from the host os to the hardware SGL - * elment data - */ -#define SCU_SGL_COPY(scu_sge, os_sge) \ - { \ - (scu_sge).length = sg_dma_len(sg); \ - (scu_sge).address_upper = \ - upper_32_bits(sg_dma_address(sg)); \ - (scu_sge).address_lower = \ - lower_32_bits(sg_dma_address(sg)); \ - (scu_sge).address_modifier = 0; \ - } - enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); enum sci_status @@ -509,22 +473,6 @@ scic_sds_request_complete(struct scic_sds_request *sci_req); extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); -/* XXX open code in caller */ -static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, - dma_addr_t phys_addr) -{ - struct isci_request *ireq = sci_req_to_ireq(sci_req); - dma_addr_t offset; - - BUG_ON(phys_addr < ireq->request_daddr); - - offset = phys_addr - ireq->request_daddr; - - BUG_ON(offset >= sizeof(*ireq)); - - return (char *)ireq + offset; -} - /* XXX open code in caller */ static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) @@ -672,7 +620,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, struct isci_tmf *isci_tmf, gfp_t gfp_flags); int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, - struct sas_task *task, gfp_t gfp_flags); + struct sas_task *task, u16 tag, gfp_t gfp_flags); void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 157e9978183..22f6fe17111 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -63,6 +63,7 @@ #include "request.h" #include "sata.h" #include "task.h" +#include "host.h" /** * isci_task_refuse() - complete the request to the upper layer driver in @@ -156,25 +157,19 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) { struct isci_host *ihost = dev_to_ihost(task->dev); struct isci_remote_device *idev; - enum sci_status status; unsigned long flags; bool io_ready; - int ret; + u16 tag; dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); - /* Check if we have room for more tasks */ - ret = isci_host_can_queue(ihost, num); - - if (ret) { - dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__); - return ret; - } - for_each_sas_task(num, task) { + enum sci_status status = SCI_FAILURE; + spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_lookup_device(task->dev); io_ready = isci_device_io_ready(idev, task); + tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_dbg(&ihost->pdev->dev, @@ -185,15 +180,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) if (!idev) { isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, SAS_DEVICE_UNKNOWN); - isci_host_can_dequeue(ihost, 1); - } else if (!io_ready) { - + } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { /* Indicate QUEUE_FULL so that the scsi midlayer * retries. */ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, SAS_QUEUE_FULL); - isci_host_can_dequeue(ihost, 1); } else { /* There is a device and it's ready for I/O. */ spin_lock_irqsave(&task->task_state_lock, flags); @@ -206,13 +198,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, SAM_STAT_TASK_ABORTED); - isci_host_can_dequeue(ihost, 1); } else { task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); /* build and send the request. */ - status = isci_request_execute(ihost, idev, task, gfp_flags); + status = isci_request_execute(ihost, idev, task, tag, gfp_flags); if (status != SCI_SUCCESS) { @@ -231,10 +222,17 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, SAS_QUEUE_FULL); - isci_host_can_dequeue(ihost, 1); } } } + if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { + spin_lock_irqsave(&ihost->scic_lock, flags); + /* command never hit the device, so just free + * the tci and skip the sequence increment + */ + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } isci_put_device(idev); } return 0; @@ -242,7 +240,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) static struct isci_request *isci_task_request_build(struct isci_host *ihost, struct isci_remote_device *idev, - struct isci_tmf *isci_tmf) + u16 tag, struct isci_tmf *isci_tmf) { enum sci_status status = SCI_FAILURE; struct isci_request *ireq = NULL; @@ -259,8 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(&ihost->sci, &idev->sci, - SCI_CONTROLLER_INVALID_IO_TAG, + status = scic_task_request_construct(&ihost->sci, &idev->sci, tag, &ireq->sci); if (status != SCI_SUCCESS) { @@ -290,8 +287,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return ireq; errout: isci_request_free(ihost, ireq); - ireq = NULL; - return ireq; + return NULL; } int isci_task_execute_tmf(struct isci_host *ihost, @@ -305,6 +301,14 @@ int isci_task_execute_tmf(struct isci_host *ihost, int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; + u16 tag; + + spin_lock_irqsave(&ihost->scic_lock, flags); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) + return ret; /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. @@ -316,7 +320,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, "%s: isci_device = %p not ready (%#lx)\n", __func__, isci_device, isci_device ? isci_device->flags : 0); - return TMF_RESP_FUNC_FAILED; + goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", @@ -327,22 +331,16 @@ int isci_task_execute_tmf(struct isci_host *ihost, /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; - ireq = isci_task_request_build(ihost, isci_device, tmf); - if (!ireq) { - dev_warn(&ihost->pdev->dev, - "%s: isci_task_request_build failed\n", - __func__); - return TMF_RESP_FUNC_FAILED; - } + ireq = isci_task_request_build(ihost, isci_device, tag, tmf); + if (!ireq) + goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ - status = scic_controller_start_task( - &ihost->sci, - sci_device, - &ireq->sci, - SCI_CONTROLLER_INVALID_IO_TAG); + status = scic_controller_start_task(&ihost->sci, + sci_device, + &ireq->sci); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -351,8 +349,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); - isci_request_free(ihost, ireq); - return ret; + goto err_ireq; } if (tmf->cb_state_func != NULL) @@ -403,6 +400,15 @@ int isci_task_execute_tmf(struct isci_host *ihost, ireq); return ret; + + err_ireq: + isci_request_free(ihost, ireq); + err_tci: + spin_lock_irqsave(&ihost->scic_lock, flags); + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return ret; } void isci_task_build_tmf( -- cgit v1.2.3-70-g09d2 From 38d8879baeb61b6946052739e7c03fa79b3a57f0 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Thu, 23 Jun 2011 14:33:48 -0700 Subject: isci: combine request flags Combine three bools into one unsigned long 'flags'. Doesn't increase the request size due to packing. (to do: optimize the structure layout). Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 48 ++++++++++++++++++++------------------------- drivers/scsi/isci/request.h | 12 ++++-------- drivers/scsi/isci/task.c | 23 +++++++++++----------- 3 files changed, 36 insertions(+), 47 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 55859d5331b..27376ba2248 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2183,7 +2183,7 @@ static void isci_request_set_open_reject_status( enum sas_open_rej_reason open_rej_reason) { /* Task in the target is done. */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *response_ptr = SAS_TASK_UNDELIVERED; *status_ptr = SAS_OPEN_REJECT; *complete_to_host_ptr = isci_perform_normal_io_completion; @@ -2248,7 +2248,7 @@ static void isci_request_handle_controller_specific_errors( else *status_ptr = SAS_ABORTED_TASK; - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *complete_to_host_ptr = isci_perform_normal_io_completion; @@ -2261,7 +2261,7 @@ static void isci_request_handle_controller_specific_errors( else *status_ptr = SAM_STAT_TASK_ABORTED; - request->complete_in_target = false; + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *complete_to_host_ptr = isci_perform_error_io_completion; @@ -2292,7 +2292,7 @@ static void isci_request_handle_controller_specific_errors( else *status_ptr = SAS_ABORTED_TASK; - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *complete_to_host_ptr = isci_perform_normal_io_completion; break; @@ -2397,11 +2397,11 @@ static void isci_request_handle_controller_specific_errors( *status_ptr = SAM_STAT_TASK_ABORTED; if (task->task_proto == SAS_PROTOCOL_SMP) { - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *complete_to_host_ptr = isci_perform_normal_io_completion; } else { - request->complete_in_target = false; + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *complete_to_host_ptr = isci_perform_error_io_completion; } @@ -2552,7 +2552,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * * The target is still there (since the TMF was successful). */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = SAS_TASK_COMPLETE; /* See if the device has been/is being stopped. Note @@ -2579,7 +2579,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * Aborting also means an external thread is explicitly managing * this request, so that we do not complete it up the stack. */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = SAS_TASK_UNDELIVERED; if (!idev) @@ -2605,7 +2605,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * the device (reset, tear down, etc.), and the I/O needs * to be completed up the stack. */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = SAS_TASK_UNDELIVERED; /* See if the device has been/is being stopped. Note @@ -2675,7 +2675,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* use the task status set in the task struct by the * isci_request_process_response_iu call. */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = task->task_status.resp; status = task->task_status.stat; break; @@ -2685,7 +2685,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, response = SAS_TASK_COMPLETE; status = SAM_STAT_GOOD; - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); if (task->task_proto == SAS_PROTOCOL_SMP) { void *rsp = &request->sci.smp.rsp; @@ -2737,7 +2737,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* The request was terminated explicitly. No handling * is needed in the SCSI error handler path. */ - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = SAS_TASK_UNDELIVERED; /* See if the device has been/is being stopped. Note @@ -2777,7 +2777,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, status = SAM_STAT_TASK_ABORTED; complete_to_host = isci_perform_error_io_completion; - request->complete_in_target = false; + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; case SCI_FAILURE_RETRY_REQUIRED: @@ -2790,7 +2790,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, status = SAS_ABORTED_TASK; complete_to_host = isci_perform_normal_io_completion; - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; @@ -2813,10 +2813,10 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, status = SAS_ABORTED_TASK; if (SAS_PROTOCOL_SMP == task->task_proto) { - request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); complete_to_host = isci_perform_normal_io_completion; } else { - request->complete_in_target = false; + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); complete_to_host = isci_perform_error_io_completion; } break; @@ -2870,7 +2870,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * terminated again, and to cause any calls into abort * task to recognize the already completed case. */ - request->terminated = true; + set_bit(IREQ_TERMINATED, &request->flags); } static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) @@ -2919,7 +2919,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine struct isci_request *ireq = sci_req_to_ireq(sci_req); /* Tell the SCI_USER that the IO request is complete */ - if (sci_req->is_task_management_request == false) + if (!test_bit(IREQ_TMF, &ireq->flags)) isci_request_io_request_complete(ihost, ireq, sci_req->sci_status); else @@ -3032,8 +3032,6 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, sci_req->scu_status = 0; sci_req->post_context = 0xFFFFFFFF; sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)]; - - sci_req->is_task_management_request = false; WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n"); } @@ -3077,7 +3075,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { - sci_req->is_task_management_request = true; + set_bit(IREQ_TMF, &sci_req_to_ireq(sci_req)->flags); memset(sci_req->tc, 0, sizeof(struct scu_task_context)); } else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; @@ -3379,12 +3377,8 @@ static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, ireq->request_daddr = handle; ireq->isci_host = ihost; ireq->io_request_completion = NULL; - ireq->terminated = false; - + ireq->flags = 0; ireq->num_sg_entries = 0; - - ireq->complete_in_target = false; - INIT_LIST_HEAD(&ireq->completed_node); INIT_LIST_HEAD(&ireq->dev_node); @@ -3496,7 +3490,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide * hardware, so clear the request handle * here so no terminations will be done. */ - ireq->terminated = true; + set_bit(IREQ_TERMINATED, &ireq->flags); isci_request_change_state(ireq, completed); } spin_unlock_irqrestore(&ihost->scic_lock, flags); diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 8c77c4cbe04..f440e421ea0 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -209,12 +209,6 @@ struct scic_sds_request { #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); - /* - * This field indicates if this request is a task management request or - * normal IO request. - */ - bool is_task_management_request; - /* * This field is a pointer to the stored rx frame data. It is used in * STP internal requests and SMP response frames. If this field is @@ -260,8 +254,10 @@ struct isci_request { enum isci_request_status status; enum task_type ttype; unsigned short io_tag; - bool complete_in_target; - bool terminated; + #define IREQ_COMPLETE_IN_TARGET 0 + #define IREQ_TERMINATED 1 + #define IREQ_TMF 2 + unsigned long flags; union ttype_ptr_union { struct sas_task *io_task_ptr; /* When ttype==io_task */ diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 22f6fe17111..d1a46710f4a 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -558,15 +558,15 @@ static void isci_terminate_request_core( : NULL; /* Note that we are not going to control - * the target to abort the request. - */ - isci_request->complete_in_target = true; + * the target to abort the request. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); /* Make sure the request wasn't just sitting around signalling * device condition (if the request handle is NULL, then the * request completed but needed additional handling here). */ - if (!isci_request->terminated) { + if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; status = scic_controller_terminate_request( @@ -609,7 +609,7 @@ static void isci_terminate_request_core( flags); /* Check for state changes. */ - if (!isci_request->terminated) { + if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { /* The best we can do is to have the * request die a silent death if it @@ -1098,9 +1098,8 @@ int isci_task_abort_task(struct sas_task *task) ret = TMF_RESP_FUNC_COMPLETE; goto out; } - if ((task->task_proto == SAS_PROTOCOL_SMP) - || old_request->complete_in_target - ) { + if (task->task_proto == SAS_PROTOCOL_SMP || + test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { spin_unlock_irqrestore(&isci_host->scic_lock, flags); @@ -1108,7 +1107,7 @@ int isci_task_abort_task(struct sas_task *task) "%s: SMP request (%d)" " or complete_in_target (%d), thus no TMF\n", __func__, (task->task_proto == SAS_PROTOCOL_SMP), - old_request->complete_in_target); + test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); /* Set the state on the task. */ isci_task_all_done(task); @@ -1136,7 +1135,7 @@ int isci_task_abort_task(struct sas_task *task) __func__); } if (ret == TMF_RESP_FUNC_COMPLETE) { - old_request->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); /* Clean up the request on our side, and wait for the aborted * I/O to complete. @@ -1252,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, isci_request_change_state(ireq, completed); tmf->status = completion_status; - ireq->complete_in_target = true; + set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); if (tmf->proto == SAS_PROTOCOL_SSP) { memcpy(&tmf->resp.resp_iu, @@ -1271,7 +1270,7 @@ isci_task_request_complete(struct isci_host *ihost, /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ - ireq->terminated = true;; + set_bit(IREQ_TERMINATED, &ireq->flags); isci_request_change_state(ireq, unallocated); list_del_init(&ireq->dev_node); -- cgit v1.2.3-70-g09d2 From db0562509800a2d4cb5cb14a66413c30484f165c Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Fri, 17 Jun 2011 14:18:39 -0700 Subject: isci: preallocate requests the dma_pool interface is optimized for object_size << page_size which is not the case with isci_request objects and the dma_pool routines show up in the top of the profile. The old io_request_table which tracked whether tci slots were in-flight or not is replaced with an IREQ_ACTIVE flag per request. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 70 ++++++++++++++++-------------- drivers/scsi/isci/host.h | 11 +---- drivers/scsi/isci/remote_device.c | 9 ++-- drivers/scsi/isci/request.c | 89 ++++++++++++--------------------------- drivers/scsi/isci/request.h | 26 +++--------- drivers/scsi/isci/task.c | 19 +++------ 6 files changed, 85 insertions(+), 139 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index c99fab53dd0..0884ae3253e 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -255,14 +255,14 @@ static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) static void scic_sds_controller_task_completion(struct scic_sds_controller *scic, u32 completion_entry) { - u32 index; - struct scic_sds_request *sci_req; - - index = SCU_GET_COMPLETION_INDEX(completion_entry); - sci_req = scic->io_request_table[index]; + u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); + struct isci_host *ihost = scic_to_ihost(scic); + struct isci_request *ireq = ihost->reqs[index]; + struct scic_sds_request *sci_req = &ireq->sci; /* Make sure that we really want to process this IO request */ - if (sci_req && sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && + if (test_bit(IREQ_ACTIVE, &ireq->flags) && + sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && ISCI_TAG_SEQ(sci_req->io_tag) == scic->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the io request handler */ scic_sds_io_request_tc_completion(sci_req, completion_entry); @@ -280,7 +280,7 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic switch (scu_get_command_request_type(completion_entry)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: - io_request = scic->io_request_table[index]; + io_request = &scic_to_ihost(scic)->reqs[index]->sci; dev_warn(scic_to_dev(scic), "%s: SCIC SDS Completion type SDMA %x for io request " "%p\n", @@ -418,7 +418,7 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: - io_request = scic->io_request_table[index]; + io_request = &ihost->reqs[index]->sci; scic_sds_io_request_event_handler(io_request, completion_entry); break; @@ -426,7 +426,7 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci switch (scu_get_event_specifier(completion_entry)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: - io_request = scic->io_request_table[index]; + io_request = &ihost->reqs[index]->sci; if (io_request != NULL) scic_sds_io_request_event_handler(io_request, completion_entry); else @@ -1187,9 +1187,6 @@ static void isci_host_completion_routine(unsigned long data) spin_lock_irq(&isci_host->scic_lock); isci_free_tag(isci_host, request->sci.io_tag); spin_unlock_irq(&isci_host->scic_lock); - - /* Free the request object. */ - isci_request_free(isci_host, request); } list_for_each_entry_safe(request, next_request, &errored_request_list, completed_node) { @@ -1227,9 +1224,6 @@ static void isci_host_completion_routine(unsigned long data) list_del_init(&request->dev_node); isci_free_tag(isci_host, request->sci.io_tag); spin_unlock_irq(&isci_host->scic_lock); - - /* Free the request object. */ - isci_request_free(isci_host, request); } } @@ -2469,13 +2463,6 @@ int isci_host_init(struct isci_host *isci_host) if (err) return err; - isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev, - sizeof(struct isci_request), - SLAB_HWCACHE_ALIGN, 0); - - if (!isci_host->dma_pool) - return -ENOMEM; - for (i = 0; i < SCI_MAX_PORTS; i++) isci_port_init(&isci_host->ports[i], isci_host, i); @@ -2489,6 +2476,25 @@ int isci_host_init(struct isci_host *isci_host) INIT_LIST_HEAD(&idev->node); } + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq; + dma_addr_t dma; + + ireq = dmam_alloc_coherent(&isci_host->pdev->dev, + sizeof(struct isci_request), &dma, + GFP_KERNEL); + if (!ireq) + return -ENOMEM; + + ireq->sci.tc = &isci_host->sci.task_context_table[i]; + ireq->sci.owning_controller = &isci_host->sci; + spin_lock_init(&ireq->state_lock); + ireq->request_daddr = dma; + ireq->isci_host = isci_host; + + isci_host->reqs[i] = ireq; + } + return 0; } @@ -2602,12 +2608,13 @@ struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u task_index = ISCI_TAG_TCI(io_tag); if (task_index < scic->task_context_entries) { - if (scic->io_request_table[task_index] != NULL) { + struct isci_request *ireq = scic_to_ihost(scic)->reqs[task_index]; + + if (test_bit(IREQ_ACTIVE, &ireq->flags)) { task_sequence = ISCI_TAG_SEQ(io_tag); - if (task_sequence == scic->io_request_sequence[task_index]) { - return scic->io_request_table[task_index]; - } + if (task_sequence == scic->io_request_sequence[task_index]) + return &ireq->sci; } } @@ -2820,7 +2827,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, if (status != SCI_SUCCESS) return status; - scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req; + set_bit(IREQ_ACTIVE, &sci_req_to_ireq(req)->flags); scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); return SCI_SUCCESS; } @@ -2897,7 +2904,7 @@ enum sci_status scic_controller_complete_io( return status; index = ISCI_TAG_TCI(request->io_tag); - scic->io_request_table[index] = NULL; + clear_bit(IREQ_ACTIVE, &sci_req_to_ireq(request)->flags); return SCI_SUCCESS; default: dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); @@ -2915,7 +2922,7 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) return SCI_FAILURE_INVALID_STATE; } - scic->io_request_table[ISCI_TAG_TCI(sci_req->io_tag)] = sci_req; + set_bit(IREQ_ACTIVE, &sci_req_to_ireq(sci_req)->flags); scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req)); return SCI_SUCCESS; } @@ -2934,6 +2941,7 @@ enum sci_task_status scic_controller_start_task( struct scic_sds_remote_device *rdev, struct scic_sds_request *req) { + struct isci_request *ireq = sci_req_to_ireq(req); enum sci_status status; if (scic->sm.current_state_id != SCIC_READY) { @@ -2947,7 +2955,7 @@ enum sci_task_status scic_controller_start_task( status = scic_sds_remote_device_start_task(scic, rdev, req); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: - scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req; + set_bit(IREQ_ACTIVE, &ireq->flags); /* * We will let framework know this task request started successfully, @@ -2956,7 +2964,7 @@ enum sci_task_status scic_controller_start_task( */ return SCI_SUCCESS; case SCI_SUCCESS: - scic->io_request_table[ISCI_TAG_TCI(req->io_tag)] = req; + set_bit(IREQ_ACTIVE, &ireq->flags); scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index d8164f5d798..446fade19b3 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -165,14 +165,6 @@ struct scic_sds_controller { */ struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; - /** - * This field is the array of IO request objects that are currently active for - * this controller object. This table is used as a fast lookup of the io - * request object that need to handle completion queue notifications. The - * table is TCi based. - */ - struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS]; - /** * This field is the free RNi data structure */ @@ -298,7 +290,6 @@ struct isci_host { union scic_oem_parameters oem_parameters; int id; /* unique within a given pci device */ - struct dma_pool *dma_pool; struct isci_phy phys[SCI_MAX_PHYS]; struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ struct sas_ha_struct sas_ha; @@ -315,7 +306,7 @@ struct isci_host { struct list_head requests_to_complete; struct list_head requests_to_errorback; spinlock_t scic_lock; - + struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; }; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c5ce0f0f364..5a86bb1e96d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -136,16 +136,19 @@ static void rnc_destruct_done(void *_dev) static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds_remote_device *sci_dev) { struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; + struct isci_host *ihost = scic_to_ihost(scic); u32 i, request_count = sci_dev->started_request_count; enum sci_status status = SCI_SUCCESS; for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) { - struct scic_sds_request *sci_req; + struct isci_request *ireq = ihost->reqs[i]; + struct scic_sds_request *sci_req = &ireq->sci; enum sci_status s; - sci_req = scic->io_request_table[i]; - if (!sci_req || sci_req->target_device != sci_dev) + if (!test_bit(IREQ_ACTIVE, &ireq->flags) || + sci_req->target_device != sci_dev) continue; + s = scic_controller_terminate_request(scic, sci_dev, sci_req); if (s != SCI_SUCCESS) status = s; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 27376ba2248..3c7ed4e61b4 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -3017,13 +3017,10 @@ static const struct sci_base_state scic_sds_request_state_table[] = { static void scic_sds_general_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) { sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT); - sci_req->io_tag = io_tag; - sci_req->owning_controller = scic; sci_req->target_device = sci_dev; sci_req->protocol = SCIC_NO_PROTOCOL; sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; @@ -3031,20 +3028,18 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, sci_req->sci_status = SCI_SUCCESS; sci_req->scu_status = 0; sci_req->post_context = 0xFFFFFFFF; - sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)]; - WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n"); } static enum sci_status scic_io_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) + struct scic_sds_request *sci_req) { struct domain_device *dev = sci_dev_to_domain(sci_dev); enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); + scic_sds_general_request_construct(scic, sci_dev, sci_req); if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; @@ -3071,7 +3066,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); + scic_sds_general_request_construct(scic, sci_dev, sci_req); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -3291,8 +3286,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) */ static enum sci_status isci_io_request_build(struct isci_host *isci_host, struct isci_request *request, - struct isci_remote_device *isci_device, - u16 tag) + struct isci_remote_device *isci_device) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(request); @@ -3325,11 +3319,8 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, return SCI_FAILURE_INSUFFICIENT_RESOURCES; } - /* build the common request object. For now, - * we will let the core allocate the IO tag. - */ status = scic_io_request_construct(&isci_host->sci, sci_device, - tag, &request->sci); + &request->sci); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -3359,65 +3350,51 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, return SCI_SUCCESS; } -static struct isci_request *isci_request_alloc_core(struct isci_host *ihost, - gfp_t gfp_flags) +static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) { - dma_addr_t handle; struct isci_request *ireq; - ireq = dma_pool_alloc(ihost->dma_pool, gfp_flags, &handle); - if (!ireq) { - dev_warn(&ihost->pdev->dev, - "%s: dma_pool_alloc returned NULL\n", __func__); - return NULL; - } - - /* initialize the request object. */ - spin_lock_init(&ireq->state_lock); - ireq->request_daddr = handle; - ireq->isci_host = ihost; + ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; + ireq->sci.io_tag = tag; ireq->io_request_completion = NULL; ireq->flags = 0; ireq->num_sg_entries = 0; INIT_LIST_HEAD(&ireq->completed_node); INIT_LIST_HEAD(&ireq->dev_node); - isci_request_change_state(ireq, allocated); return ireq; } -static struct isci_request *isci_request_alloc_io(struct isci_host *ihost, - struct sas_task *task, - gfp_t gfp_flags) +static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, + struct sas_task *task, + u16 tag) { struct isci_request *ireq; - ireq = isci_request_alloc_core(ihost, gfp_flags); - if (ireq) { - ireq->ttype_ptr.io_task_ptr = task; - ireq->ttype = io_task; - task->lldd_task = ireq; - } + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.io_task_ptr = task; + ireq->ttype = io_task; + task->lldd_task = ireq; + return ireq; } -struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, - struct isci_tmf *isci_tmf, - gfp_t gfp_flags) +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag) { struct isci_request *ireq; - ireq = isci_request_alloc_core(ihost, gfp_flags); - if (ireq) { - ireq->ttype_ptr.tmf_task_ptr = isci_tmf; - ireq->ttype = tmf_task; - } + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.tmf_task_ptr = isci_tmf; + ireq->ttype = tmf_task; + return ireq; } int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, - struct sas_task *task, u16 tag, gfp_t gfp_flags) + struct sas_task *task, u16 tag) { enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; struct isci_request *ireq; @@ -3425,17 +3402,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide int ret = 0; /* do common allocation and init of request object. */ - ireq = isci_request_alloc_io(ihost, task, gfp_flags); - if (!ireq) - goto out; + ireq = isci_io_request_from_tag(ihost, task, tag); - status = isci_io_request_build(ihost, ireq, idev, tag); + status = isci_io_request_build(ihost, ireq, idev); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: request_construct failed - status = 0x%x\n", __func__, status); - goto out; + return status; } spin_lock_irqsave(&ihost->scic_lock, flags); @@ -3468,7 +3443,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide "%s: failed request start (0x%x)\n", __func__, status); spin_unlock_irqrestore(&ihost->scic_lock, flags); - goto out; + return status; } /* Either I/O started OK, or the core has signaled that @@ -3518,13 +3493,5 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide status = SCI_SUCCESS; } - out: - if (status != SCI_SUCCESS) { - /* release dma memory on failure. */ - isci_request_free(ihost, ireq); - ireq = NULL; - ret = SCI_FAILURE; - } - return ret; } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index f440e421ea0..7628decbd53 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -257,6 +257,7 @@ struct isci_request { #define IREQ_COMPLETE_IN_TARGET 0 #define IREQ_TERMINATED 1 #define IREQ_TMF 2 + #define IREQ_ACTIVE 3 unsigned long flags; union ttype_ptr_union { @@ -590,33 +591,16 @@ isci_request_change_started_to_aborted(struct isci_request *isci_request, completion_ptr, aborted); } -/** - * isci_request_free() - This function frees the request object. - * @isci_host: This parameter specifies the ISCI host object - * @isci_request: This parameter points to the isci_request object - * - */ -static inline void isci_request_free(struct isci_host *isci_host, - struct isci_request *isci_request) -{ - if (!isci_request) - return; - - /* release the dma memory if we fail. */ - dma_pool_free(isci_host->dma_pool, - isci_request, - isci_request->request_daddr); -} #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) -struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, - struct isci_tmf *isci_tmf, - gfp_t gfp_flags); +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag); int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, - struct sas_task *task, u16 tag, gfp_t gfp_flags); + struct sas_task *task, u16 tag); void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d1a46710f4a..d2dba835489 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -203,7 +203,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) spin_unlock_irqrestore(&task->task_state_lock, flags); /* build and send the request. */ - status = isci_request_execute(ihost, idev, task, tag, gfp_flags); + status = isci_request_execute(ihost, idev, task, tag); if (status != SCI_SUCCESS) { @@ -252,7 +252,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, dev = idev->domain_dev; /* do common allocation and init of request object. */ - ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC); + ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag); if (!ireq) return NULL; @@ -266,7 +266,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, "status = 0x%x\n", __func__, status); - goto errout; + return NULL; } /* XXX convert to get this from task->tproto like other drivers */ @@ -274,7 +274,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, isci_tmf->proto = SAS_PROTOCOL_SSP; status = scic_task_request_construct_ssp(&ireq->sci); if (status != SCI_SUCCESS) - goto errout; + return NULL; } if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -282,12 +282,9 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, status = isci_sata_management_task_request_build(ireq); if (status != SCI_SUCCESS) - goto errout; + return NULL; } return ireq; - errout: - isci_request_free(ihost, ireq); - return NULL; } int isci_task_execute_tmf(struct isci_host *ihost, @@ -349,7 +346,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); - goto err_ireq; + goto err_tci; } if (tmf->cb_state_func != NULL) @@ -401,8 +398,6 @@ int isci_task_execute_tmf(struct isci_host *ihost, return ret; - err_ireq: - isci_request_free(ihost, ireq); err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); @@ -516,8 +511,6 @@ static void isci_request_cleanup_completed_loiterer( spin_lock_irqsave(&isci_host->scic_lock, flags); list_del_init(&isci_request->dev_node); spin_unlock_irqrestore(&isci_host->scic_lock, flags); - - isci_request_free(isci_host, isci_request); } } -- cgit v1.2.3-70-g09d2 From ba7cb22342a66505a831bb7e4541fef90e0193c9 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Mon, 27 Jun 2011 11:56:41 -0700 Subject: isci: rename / clean up scic_sds_stp_request * Rename scic_sds_stp_request to isci_stp_request * Remove the unused fields and union indirection Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/request.c | 147 ++++++++++++++++++++------------------------ drivers/scsi/isci/request.h | 84 +++++++------------------ 2 files changed, 89 insertions(+), 142 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 3c7ed4e61b4..8520626b02f 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -433,24 +433,20 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, bool copy_rx_frame) { - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; - struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; + struct isci_stp_request *stp_req = &sci_req->stp.req; scu_stp_raw_request_construct_task_context(sci_req); - pio->current_transfer_bytes = 0; - pio->ending_error = 0; - pio->ending_status = 0; - - pio->request_current.sgl_offset = 0; - pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; + stp_req->status = 0; + stp_req->sgl.offset = 0; + stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { scic_sds_request_build_sgl(sci_req); - pio->request_current.sgl_index = 0; + stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ - pio->request_current.sgl_index = -1; + stp_req->sgl.index = -1; } return SCI_SUCCESS; @@ -1151,22 +1147,22 @@ void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, req->tc->type.stp.ncq_tag = ncq_tag; } -static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req) +static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) { struct scu_sgl_element *sgl; struct scu_sgl_element_pair *sgl_pair; struct scic_sds_request *sci_req = to_sci_req(stp_req); - struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; + struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; - sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); + sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); if (!sgl_pair) sgl = NULL; - else if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { + else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { if (sgl_pair->B.address_lower == 0 && sgl_pair->B.address_upper == 0) { sgl = NULL; } else { - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; sgl = &sgl_pair->B; } } else { @@ -1174,9 +1170,9 @@ static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req sgl_pair->next_pair_upper == 0) { sgl = NULL; } else { - pio_sgl->sgl_index++; - pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; - sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); + pio_sgl->index++; + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; + sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); sgl = &sgl_pair->A; } } @@ -1221,7 +1217,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( struct scic_sds_request *sci_req, u32 length) { - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &sci_req->stp.req; struct scu_task_context *task_context = sci_req->tc; struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *current_sgl; @@ -1229,8 +1225,8 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( /* Recycle the TC and reconstruct it for sending out DATA FIS containing * for the data from current_sgl+offset for the input length */ - sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) + sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) current_sgl = &sgl_pair->A; else current_sgl = &sgl_pair->B; @@ -1247,54 +1243,48 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) { - - struct scu_sgl_element *current_sgl; - u32 sgl_offset; - u32 remaining_bytes_in_current_sgl = 0; - enum sci_status status = SCI_SUCCESS; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &sci_req->stp.req; struct scu_sgl_element_pair *sgl_pair; + struct scu_sgl_element *sgl; + enum sci_status status; + u32 offset; + u32 len = 0; - sgl_offset = stp_req->type.pio.request_current.sgl_offset; - sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); + offset = stp_req->sgl.offset; + sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) return SCI_FAILURE; - if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { - current_sgl = &sgl_pair->A; - remaining_bytes_in_current_sgl = sgl_pair->A.length - sgl_offset; + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { + sgl = &sgl_pair->A; + len = sgl_pair->A.length - offset; } else { - current_sgl = &sgl_pair->B; - remaining_bytes_in_current_sgl = sgl_pair->B.length - sgl_offset; + sgl = &sgl_pair->B; + len = sgl_pair->B.length - offset; } - if (stp_req->type.pio.pio_transfer_bytes > 0) { - if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); - if (status == SCI_SUCCESS) { - stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; - - /* update the current sgl, sgl_offset and save for future */ - current_sgl = pio_sgl_next(stp_req); - sgl_offset = 0; - } - } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { - /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ - scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); + if (stp_req->pio_len == 0) + return SCI_SUCCESS; - if (status == SCI_SUCCESS) { - /* Sgl offset will be adjusted and saved for future */ - sgl_offset += stp_req->type.pio.pio_transfer_bytes; - current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; - stp_req->type.pio.pio_transfer_bytes = 0; - } - } + if (stp_req->pio_len >= len) { + status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, len); + if (status != SCI_SUCCESS) + return status; + stp_req->pio_len -= len; + + /* update the current sgl, offset and save for future */ + sgl = pio_sgl_next(stp_req); + offset = 0; + } else if (stp_req->pio_len < len) { + scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->pio_len); + + /* Sgl offset will be adjusted and saved for future */ + offset += stp_req->pio_len; + sgl->address_lower += stp_req->pio_len; + stp_req->pio_len = 0; } - if (status == SCI_SUCCESS) { - stp_req->type.pio.request_current.sgl_offset = sgl_offset; - } + stp_req->sgl.offset = offset; return status; } @@ -1309,7 +1299,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc * specified data region. enum sci_status */ static enum sci_status -scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, +scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, u8 *data_buf, u32 len) { struct scic_sds_request *sci_req; @@ -1356,7 +1346,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *s * Copy the data buffer to the io request data region. enum sci_status */ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( - struct scic_sds_stp_request *sci_req, + struct isci_stp_request *stp_req, u8 *data_buffer) { enum sci_status status; @@ -1364,19 +1354,19 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( /* * If there is less than 1K remaining in the transfer request * copy just the data for the transfer */ - if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { + if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); + stp_req, data_buffer, stp_req->pio_len); if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes = 0; + stp_req->pio_len = 0; } else { /* We are transfering the whole frame so copy */ status = scic_sds_stp_request_pio_data_in_copy_data_buffer( - sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); + stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); if (status == SCI_SUCCESS) - sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; + stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; } return status; @@ -1419,18 +1409,18 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, { enum sci_status status = SCI_SUCCESS; bool all_frames_transferred = false; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &sci_req->stp.req; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ - if (stp_req->type.pio.pio_transfer_bytes != 0) { + if (stp_req->pio_len != 0) { status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); if (status == SCI_SUCCESS) { - if (stp_req->type.pio.pio_transfer_bytes == 0) + if (stp_req->pio_len == 0) all_frames_transferred = true; } - } else if (stp_req->type.pio.pio_transfer_bytes == 0) { + } else if (stp_req->pio_len == 0) { /* * this will happen if the all data is written at the * first time after the pio setup fis is received @@ -1507,7 +1497,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, u32 frame_index) { struct scic_sds_controller *scic = sci_req->owning_controller; - struct scic_sds_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &sci_req->stp.req; enum sci_base_request_states state; enum sci_status status; ssize_t word_cnt; @@ -1727,16 +1717,16 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, */ /* transfer_count: first 16bits in the 4th dword */ - stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; + stp_req->pio_len = frame_buffer[3] & 0xffff; - /* ending_status: 4th byte in the 3rd dword */ - stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; + /* status: 4th byte in the 3rd dword */ + stp_req->status = (frame_buffer[2] >> 24) & 0xff; scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, frame_header, frame_buffer); - sci_req->stp.rsp.status = stp_req->type.pio.ending_status; + sci_req->stp.rsp.status = stp_req->status; /* The next state is dependent on whether the * request was PIO Data-in or Data out @@ -1839,9 +1829,9 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, return status; } - if (stp_req->type.pio.request_current.sgl_index < 0) { + if (stp_req->sgl.index < 0) { sci_req->saved_rx_frame_index = frame_index; - stp_req->type.pio.pio_transfer_bytes = 0; + stp_req->pio_len = 0; } else { scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, frame_index, @@ -1857,11 +1847,10 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, /* Check for the end of the transfer, are there more * bytes remaining for this data transfer */ - if (status != SCI_SUCCESS || - stp_req->type.pio.pio_transfer_bytes != 0) + if (status != SCI_SUCCESS || stp_req->pio_len != 0) return status; - if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { + if ((stp_req->status & ATA_BUSY) == 0) { scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 7628decbd53..7fd98531d1f 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -89,67 +89,25 @@ enum sci_request_protocol { SCIC_STP_PROTOCOL }; /* XXX remove me, use sas_task.{dev|task_proto} instead */; -struct scic_sds_stp_request { - union { - u32 ncq; - - u32 udma; - - struct scic_sds_stp_pio_request { - /* - * Total transfer for the entire PIO request recorded - * at request constuction time. - * - * @todo Should we just decrement this value for each - * byte of data transitted or received to elemenate - * the current_transfer_bytes field? - */ - u32 total_transfer_bytes; - - /* - * Total number of bytes received/transmitted in data - * frames since the start of the IO request. At the - * end of the IO request this should equal the - * total_transfer_bytes. - */ - u32 current_transfer_bytes; - - /* - * The number of bytes requested in the in the PIO - * setup. - */ - u32 pio_transfer_bytes; - - /* - * PIO Setup ending status value to tell us if we need - * to wait for another FIS or if the transfer is - * complete. On the receipt of a D2H FIS this will be - * the status field of that FIS. - */ - u8 ending_status; - - /* - * On receipt of a D2H FIS this will be the ending - * error field if the ending_status has the - * SATA_STATUS_ERR bit set. - */ - u8 ending_error; - - struct scic_sds_request_pio_sgl { - int sgl_index; - u8 sgl_set; - u32 sgl_offset; - } request_current; - } pio; - - struct { - /* - * The number of bytes requested in the PIO setup - * before CDB data frame. - */ - u32 device_preferred_cdb_length; - } packet; - } type; +/** + * isci_stp_request - extra request infrastructure to handle pio/atapi protocol + * @pio_len - number of bytes requested at PIO setup + * @status - pio setup ending status value to tell us if we need + * to wait for another fis or if the transfer is complete. Upon + * receipt of a d2h fis this will be the status field of that fis. + * @sgl - track pio transfer progress as we iterate through the sgl + * @device_cdb_len - atapi device advertises it's transfer constraints at setup + */ +struct isci_stp_request { + u32 pio_len; + u8 status; + + struct isci_stp_pio_sgl { + int index; + u8 set; + u32 offset; + } sgl; + u32 device_cdb_len; }; struct scic_sds_request { @@ -235,14 +193,14 @@ struct scic_sds_request { } smp; struct { - struct scic_sds_stp_request req; + struct isci_stp_request req; struct host_to_dev_fis cmd; struct dev_to_host_fis rsp; } stp; }; }; -static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req) +static inline struct scic_sds_request *to_sci_req(struct isci_stp_request *stp_req) { struct scic_sds_request *sci_req; -- cgit v1.2.3-70-g09d2 From 5076a1a97e2fa61c847a5fdd4b1991faf7716da6 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Mon, 27 Jun 2011 14:57:03 -0700 Subject: isci: unify isci_request and scic_sds_request They are one in the same object so remove the distinction. The near duplicate fields (owning_controller, and isci_host) will be cleaned up after the scic_sds_contoller isci_host unification. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 98 ++--- drivers/scsi/isci/host.h | 16 +- drivers/scsi/isci/phy.c | 2 +- drivers/scsi/isci/port.c | 4 +- drivers/scsi/isci/port.h | 6 +- drivers/scsi/isci/remote_device.c | 92 +++-- drivers/scsi/isci/remote_device.h | 8 +- drivers/scsi/isci/remote_node_context.c | 4 +- drivers/scsi/isci/remote_node_context.h | 6 +- drivers/scsi/isci/request.c | 683 ++++++++++++++++---------------- drivers/scsi/isci/request.h | 184 +++------ drivers/scsi/isci/sata.c | 9 +- drivers/scsi/isci/task.c | 17 +- 13 files changed, 514 insertions(+), 615 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 0884ae3253e..d91cd6d8274 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -258,21 +258,20 @@ static void scic_sds_controller_task_completion(struct scic_sds_controller *scic u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); struct isci_host *ihost = scic_to_ihost(scic); struct isci_request *ireq = ihost->reqs[index]; - struct scic_sds_request *sci_req = &ireq->sci; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && - sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && - ISCI_TAG_SEQ(sci_req->io_tag) == scic->io_request_sequence[index]) + ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && + ISCI_TAG_SEQ(ireq->io_tag) == scic->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the io request handler */ - scic_sds_io_request_tc_completion(sci_req, completion_entry); + scic_sds_io_request_tc_completion(ireq, completion_entry); } static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic, u32 completion_entry) { u32 index; - struct scic_sds_request *io_request; + struct isci_request *ireq; struct scic_sds_remote_device *device; index = SCU_GET_COMPLETION_INDEX(completion_entry); @@ -280,41 +279,27 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic switch (scu_get_command_request_type(completion_entry)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: - io_request = &scic_to_ihost(scic)->reqs[index]->sci; - dev_warn(scic_to_dev(scic), - "%s: SCIC SDS Completion type SDMA %x for io request " - "%p\n", - __func__, - completion_entry, - io_request); + ireq = scic_to_ihost(scic)->reqs[index]; + dev_warn(scic_to_dev(scic), "%s: %x for io request %p\n", + __func__, completion_entry, ireq); /* @todo For a post TC operation we need to fail the IO * request */ break; - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: device = scic->device_table[index]; - dev_warn(scic_to_dev(scic), - "%s: SCIC SDS Completion type SDMA %x for remote " - "device %p\n", - __func__, - completion_entry, - device); + dev_warn(scic_to_dev(scic), "%s: %x for device %p\n", + __func__, completion_entry, device); /* @todo For a port RNC operation we need to fail the * device */ break; - default: - dev_warn(scic_to_dev(scic), - "%s: SCIC SDS Completion unknown SDMA completion " - "type %x\n", - __func__, - completion_entry); + dev_warn(scic_to_dev(scic), "%s: unknown completion type %x\n", + __func__, completion_entry); break; - } } @@ -385,8 +370,8 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci u32 completion_entry) { struct isci_host *ihost = scic_to_ihost(scic); - struct scic_sds_request *io_request; struct scic_sds_remote_device *device; + struct isci_request *ireq; struct scic_sds_phy *phy; u32 index; @@ -418,17 +403,17 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: - io_request = &ihost->reqs[index]->sci; - scic_sds_io_request_event_handler(io_request, completion_entry); + ireq = ihost->reqs[index]; + scic_sds_io_request_event_handler(ireq, completion_entry); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: switch (scu_get_event_specifier(completion_entry)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: - io_request = &ihost->reqs[index]->sci; - if (io_request != NULL) - scic_sds_io_request_event_handler(io_request, completion_entry); + ireq = ihost->reqs[index]; + if (ireq != NULL) + scic_sds_io_request_event_handler(ireq, completion_entry); else dev_warn(scic_to_dev(scic), "%s: SCIC Controller 0x%p received " @@ -1185,7 +1170,7 @@ static void isci_host_completion_routine(unsigned long data) } spin_lock_irq(&isci_host->scic_lock); - isci_free_tag(isci_host, request->sci.io_tag); + isci_free_tag(isci_host, request->io_tag); spin_unlock_irq(&isci_host->scic_lock); } list_for_each_entry_safe(request, next_request, &errored_request_list, @@ -1222,7 +1207,7 @@ static void isci_host_completion_routine(unsigned long data) * of pending requests. */ list_del_init(&request->dev_node); - isci_free_tag(isci_host, request->sci.io_tag); + isci_free_tag(isci_host, request->io_tag); spin_unlock_irq(&isci_host->scic_lock); } } @@ -2486,8 +2471,8 @@ int isci_host_init(struct isci_host *isci_host) if (!ireq) return -ENOMEM; - ireq->sci.tc = &isci_host->sci.task_context_table[i]; - ireq->sci.owning_controller = &isci_host->sci; + ireq->tc = &isci_host->sci.task_context_table[i]; + ireq->owning_controller = &isci_host->sci; spin_lock_init(&ireq->state_lock); ireq->request_daddr = dma; ireq->isci_host = isci_host; @@ -2600,7 +2585,7 @@ void scic_sds_controller_post_request( writel(request, &scic->smu_registers->post_context_port); } -struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) +struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) { u16 task_index; u16 task_sequence; @@ -2614,7 +2599,7 @@ struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u task_sequence = ISCI_TAG_SEQ(io_tag); if (task_sequence == scic->io_request_sequence[task_index]) - return &ireq->sci; + return ireq; } } @@ -2814,7 +2799,7 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) */ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *req) + struct isci_request *ireq) { enum sci_status status; @@ -2823,12 +2808,12 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_io(scic, rdev, req); + status = scic_sds_remote_device_start_io(scic, rdev, ireq); if (status != SCI_SUCCESS) return status; - set_bit(IREQ_ACTIVE, &sci_req_to_ireq(req)->flags); - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); + set_bit(IREQ_ACTIVE, &ireq->flags); + scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); return SCI_SUCCESS; } @@ -2851,7 +2836,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *req) + struct isci_request *ireq) { enum sci_status status; @@ -2861,7 +2846,7 @@ enum sci_status scic_controller_terminate_request( return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_io_request_terminate(req); + status = scic_sds_io_request_terminate(ireq); if (status != SCI_SUCCESS) return status; @@ -2870,7 +2855,7 @@ enum sci_status scic_controller_terminate_request( * request sub-type. */ scic_sds_controller_post_request(scic, - scic_sds_request_get_post_context(req) | + scic_sds_request_get_post_context(ireq) | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); return SCI_SUCCESS; } @@ -2889,7 +2874,7 @@ enum sci_status scic_controller_terminate_request( enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *request) + struct isci_request *ireq) { enum sci_status status; u16 index; @@ -2899,12 +2884,12 @@ enum sci_status scic_controller_complete_io( /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: - status = scic_sds_remote_device_complete_io(scic, rdev, request); + status = scic_sds_remote_device_complete_io(scic, rdev, ireq); if (status != SCI_SUCCESS) return status; - index = ISCI_TAG_TCI(request->io_tag); - clear_bit(IREQ_ACTIVE, &sci_req_to_ireq(request)->flags); + index = ISCI_TAG_TCI(ireq->io_tag); + clear_bit(IREQ_ACTIVE, &ireq->flags); return SCI_SUCCESS; default: dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); @@ -2913,17 +2898,17 @@ enum sci_status scic_controller_complete_io( } -enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) +enum sci_status scic_controller_continue_io(struct isci_request *ireq) { - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; if (scic->sm.current_state_id != SCIC_READY) { dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); return SCI_FAILURE_INVALID_STATE; } - set_bit(IREQ_ACTIVE, &sci_req_to_ireq(sci_req)->flags); - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req)); + set_bit(IREQ_ACTIVE, &ireq->flags); + scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); return SCI_SUCCESS; } @@ -2939,9 +2924,8 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *rdev, - struct scic_sds_request *req) + struct isci_request *ireq) { - struct isci_request *ireq = sci_req_to_ireq(req); enum sci_status status; if (scic->sm.current_state_id != SCIC_READY) { @@ -2952,7 +2936,7 @@ enum sci_task_status scic_controller_start_task( return SCI_TASK_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_task(scic, rdev, req); + status = scic_sds_remote_device_start_task(scic, rdev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); @@ -2967,7 +2951,7 @@ enum sci_task_status scic_controller_start_task( set_bit(IREQ_ACTIVE, &ireq->flags); scic_sds_controller_post_request(scic, - scic_sds_request_get_post_context(req)); + scic_sds_request_get_post_context(ireq)); break; default: break; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 446fade19b3..0b26d25c19a 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -64,7 +64,7 @@ #include "unsolicited_frame_control.h" #include "probe_roms.h" -struct scic_sds_request; +struct isci_request; struct scu_task_context; @@ -601,7 +601,7 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe struct scic_sds_controller *scic, u16 node_id); -struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, +struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag); void scic_sds_controller_power_control_queue_insert( @@ -628,11 +628,11 @@ void scic_sds_controller_remote_device_stopped( void scic_sds_controller_copy_task_context( struct scic_sds_controller *scic, - struct scic_sds_request *this_request); + struct isci_request *ireq); void scic_sds_controller_register_setup(struct scic_sds_controller *scic); -enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); +enum sci_status scic_controller_continue_io(struct isci_request *ireq); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); void isci_host_scan_start(struct Scsi_Host *); u16 isci_alloc_tag(struct isci_host *ihost); @@ -665,22 +665,22 @@ void scic_controller_disable_interrupts( enum sci_status scic_controller_start_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *io_request); + struct isci_request *ireq); enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *task_request); + struct isci_request *ireq); enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *request); + struct isci_request *ireq); enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, struct scic_sds_remote_device *remote_device, - struct scic_sds_request *io_request); + struct isci_request *ireq); void scic_sds_port_configuration_agent_construct( struct scic_sds_port_configuration_agent *port_agent); diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index c01d76210aa..98d93aeea75 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -983,7 +983,7 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy, "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - + } static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 0e84e29335d..bd091549b4f 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -1611,7 +1611,7 @@ enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port, enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { enum scic_sds_port_states state; @@ -1631,7 +1631,7 @@ enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port, enum sci_status scic_sds_port_complete_io(struct scic_sds_port *sci_port, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { enum scic_sds_port_states state; diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index a44e541914f..668f3a14cd7 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -354,17 +354,17 @@ enum sci_status scic_sds_port_link_up(struct scic_sds_port *sci_port, enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port, struct scic_sds_phy *sci_phy); -struct scic_sds_request; +struct isci_request; struct scic_sds_remote_device; enum sci_status scic_sds_port_start_io( struct scic_sds_port *sci_port, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req); + struct isci_request *ireq); enum sci_status scic_sds_port_complete_io( struct scic_sds_port *sci_port, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req); + struct isci_request *ireq); enum sas_linkrate scic_sds_port_get_max_allowed_speed( struct scic_sds_port *sci_port); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 5a86bb1e96d..c7cb0c54df5 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, scic_controller_terminate_request(&ihost->sci, &idev->sci, - &ireq->sci); + ireq); } /* Fall through into the default case... */ default: @@ -142,14 +142,13 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) { struct isci_request *ireq = ihost->reqs[i]; - struct scic_sds_request *sci_req = &ireq->sci; enum sci_status s; if (!test_bit(IREQ_ACTIVE, &ireq->flags) || - sci_req->target_device != sci_dev) + ireq->target_device != sci_dev) continue; - s = scic_controller_terminate_request(scic, sci_dev, sci_req); + s = scic_controller_terminate_request(scic, sci_dev, ireq); if (s != SCI_SUCCESS) status = s; } @@ -299,7 +298,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi case SCI_DEV_STOPPING: case SCI_DEV_FAILED: case SCI_DEV_RESETTING: { - struct scic_sds_request *sci_req; + struct isci_request *ireq; struct ssp_frame_hdr hdr; void *frame_header; ssize_t word_cnt; @@ -313,10 +312,10 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi word_cnt = sizeof(hdr) / sizeof(u32); sci_swab32_cpy(&hdr, frame_header, word_cnt); - sci_req = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); - if (sci_req && sci_req->target_device == sci_dev) { + ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); + if (ireq && ireq->target_device == sci_dev) { /* The IO request is now in charge of releasing the frame */ - status = scic_sds_io_request_frame_handler(sci_req, frame_index); + status = scic_sds_io_request_frame_handler(ireq, frame_index); } else { /* We could not map this tag to a valid IO * request Just toss the frame and continue @@ -448,14 +447,14 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi } static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req, + struct isci_request *ireq, enum sci_status status) { struct scic_sds_port *sci_port = sci_dev->owning_port; /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) - scic_sds_port_complete_io(sci_port, sci_dev, sci_req); + scic_sds_port_complete_io(sci_port, sci_dev, ireq); else { kref_get(&sci_dev_to_idev(sci_dev)->kref); scic_sds_remote_device_increment_request_count(sci_dev); @@ -464,12 +463,11 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device * enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_port *sci_port = sci_dev->owning_port; - struct isci_request *ireq = sci_req_to_ireq(sci_req); enum sci_status status; switch (state) { @@ -491,15 +489,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic * successful it will start the request for the port object then * increment its own request count. */ - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); break; case SCI_STP_DEV_IDLE: { /* handle the start io operation for a sata device that is in @@ -513,22 +511,22 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic enum scic_sds_remote_device_states new_state; struct sas_task *task = isci_request_access_task(ireq); - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); if (status != SCI_SUCCESS) break; if (task->ata_task.use_ncq) new_state = SCI_STP_DEV_NCQ; else { - sci_dev->working_request = sci_req; + sci_dev->working_request = ireq; new_state = SCI_STP_DEV_CMD; } sci_change_state(sm, new_state); @@ -538,15 +536,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); } else return SCI_FAILURE_INVALID_STATE; break; @@ -554,19 +552,19 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; case SCI_SMP_DEV_IDLE: - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); if (status != SCI_SUCCESS) break; - sci_dev->working_request = sci_req; + sci_dev->working_request = ireq; sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD); break; case SCI_STP_DEV_CMD: @@ -577,21 +575,21 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic return SCI_FAILURE_INVALID_STATE; } - scic_sds_remote_device_start_request(sci_dev, sci_req, status); + scic_sds_remote_device_start_request(sci_dev, ireq, status); return status; } static enum sci_status common_complete_io(struct scic_sds_port *sci_port, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { enum sci_status status; - status = scic_sds_request_complete(sci_req); + status = scic_sds_request_complete(ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_port_complete_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_complete_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; @@ -601,7 +599,7 @@ static enum sci_status common_complete_io(struct scic_sds_port *sci_port, enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; @@ -623,16 +621,16 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s case SCI_DEV_READY: case SCI_STP_DEV_AWAIT_RESET: case SCI_DEV_RESETTING: - status = common_complete_io(sci_port, sci_dev, sci_req); + status = common_complete_io(sci_port, sci_dev, ireq); break; case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: - status = common_complete_io(sci_port, sci_dev, sci_req); + status = common_complete_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) break; - if (sci_req->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { /* This request causes hardware error, device needs to be Lun Reset. * So here we force the state machine to IDLE state so the rest IOs * can reach RNC state handler, these IOs will be completed by RNC with @@ -643,13 +641,13 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: - status = common_complete_io(sci_port, sci_dev, sci_req); + status = common_complete_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) break; sci_change_state(sm, SCI_SMP_DEV_IDLE); break; case SCI_DEV_STOPPING: - status = common_complete_io(sci_port, sci_dev, sci_req); + status = common_complete_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) break; @@ -664,7 +662,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s dev_err(scirdev_to_dev(sci_dev), "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " "could not complete\n", __func__, sci_port, - sci_dev, sci_req, status); + sci_dev, ireq, status); else isci_put_device(sci_dev_to_idev(sci_dev)); @@ -682,7 +680,7 @@ static void scic_sds_remote_device_continue_request(void *dev) enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { struct sci_base_state_machine *sm = &sci_dev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; @@ -708,15 +706,15 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) goto out; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); if (status != SCI_SUCCESS) goto out; @@ -724,7 +722,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * replace the request that probably resulted in the task * management request. */ - sci_dev->working_request = sci_req; + sci_dev->working_request = ireq; sci_change_state(sm, SCI_STP_DEV_CMD); /* The remote node context must cleanup the TCi to NCQ mapping @@ -741,25 +739,25 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc sci_dev); out: - scic_sds_remote_device_start_request(sci_dev, sci_req, status); + scic_sds_remote_device_start_request(sci_dev, ireq, status); /* We need to let the controller start request handler know that * it can't post TC yet. We will provide a callback function to * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; case SCI_DEV_READY: - status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); + status = scic_sds_port_start_io(sci_port, sci_dev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req); + status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(sci_req); + status = scic_sds_request_start(ireq); break; } - scic_sds_remote_device_start_request(sci_dev, sci_req, status); + scic_sds_remote_device_start_request(sci_dev, ireq, status); return status; } diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 0d9e37fe734..6ac5dfb7d1d 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -120,7 +120,7 @@ struct scic_sds_remote_device { * used only for SATA requests since the unsolicited frames we get from the * hardware have no Tag value to look up the io request object. */ - struct scic_sds_request *working_request; + struct isci_request *working_request; /** * This field contains the reason for the remote device going not_ready. It is @@ -466,17 +466,17 @@ enum sci_status scic_sds_remote_device_event_handler( enum sci_status scic_sds_remote_device_start_io( struct scic_sds_controller *controller, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *io_request); + struct isci_request *ireq); enum sci_status scic_sds_remote_device_start_task( struct scic_sds_controller *controller, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *io_request); + struct isci_request *ireq); enum sci_status scic_sds_remote_device_complete_io( struct scic_sds_controller *controller, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *io_request); + struct isci_request *ireq); enum sci_status scic_sds_remote_device_suspend( struct scic_sds_remote_device *sci_dev, diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index b6774bcdabd..1b51fe55314 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -598,7 +598,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ } enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; @@ -623,7 +623,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod } enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 67a45b686a9..35e6ae61690 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -78,7 +78,7 @@ #define SCU_HARDWARE_SUSPENSION (0) #define SCI_SOFTWARE_SUSPENSION (1) -struct scic_sds_request; +struct isci_request; struct scic_sds_remote_device; struct scic_sds_remote_node_context; @@ -220,8 +220,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ scics_sds_remote_node_context_callback cb_fn, void *cb_p); enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, - struct scic_sds_request *sci_req); + struct isci_request *ireq); enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, - struct scic_sds_request *sci_req); + struct isci_request *ireq); #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 8520626b02f..c544bc79ce1 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -61,35 +61,35 @@ #include "scu_event_codes.h" #include "sas.h" -static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, +static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, int idx) { if (idx == 0) - return &sci_req->tc->sgl_pair_ab; + return &ireq->tc->sgl_pair_ab; else if (idx == 1) - return &sci_req->tc->sgl_pair_cd; + return &ireq->tc->sgl_pair_cd; else if (idx < 0) return NULL; else - return &sci_req->sg_table[idx - 2]; + return &ireq->sg_table[idx - 2]; } static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, - struct scic_sds_request *sci_req, u32 idx) + struct isci_request *ireq, u32 idx) { u32 offset; if (idx == 0) { - offset = (void *) &sci_req->tc->sgl_pair_ab - + offset = (void *) &ireq->tc->sgl_pair_ab - (void *) &scic->task_context_table[0]; return scic->task_context_dma + offset; } else if (idx == 1) { - offset = (void *) &sci_req->tc->sgl_pair_cd - + offset = (void *) &ireq->tc->sgl_pair_cd - (void *) &scic->task_context_table[0]; return scic->task_context_dma + offset; } - return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); + return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); } static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) @@ -100,12 +100,11 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) e->address_modifier = 0; } -static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) +static void scic_sds_request_build_sgl(struct isci_request *ireq) { - struct isci_request *isci_request = sci_req_to_ireq(sds_request); - struct isci_host *isci_host = isci_request->isci_host; + struct isci_host *isci_host = ireq->isci_host; struct scic_sds_controller *scic = &isci_host->sci; - struct sas_task *task = isci_request_access_task(isci_request); + struct sas_task *task = isci_request_access_task(ireq); struct scatterlist *sg = NULL; dma_addr_t dma_addr; u32 sg_idx = 0; @@ -116,7 +115,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg = task->scatter; while (sg) { - scu_sg = to_sgl_element_pair(sds_request, sg_idx); + scu_sg = to_sgl_element_pair(ireq, sg_idx); init_sgl_element(&scu_sg->A, sg); sg = sg_next(sg); if (sg) { @@ -127,7 +126,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) if (prev_sg) { dma_addr = to_sgl_element_pair_dma(scic, - sds_request, + ireq, sg_idx); prev_sg->next_pair_upper = @@ -140,14 +139,14 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg_idx++; } } else { /* handle when no sg */ - scu_sg = to_sgl_element_pair(sds_request, sg_idx); + scu_sg = to_sgl_element_pair(ireq, sg_idx); dma_addr = dma_map_single(&isci_host->pdev->dev, task->scatter, task->total_xfer_len, task->data_dir); - isci_request->zero_scatter_daddr = dma_addr; + ireq->zero_scatter_daddr = dma_addr; scu_sg->A.length = task->total_xfer_len; scu_sg->A.address_upper = upper_32_bits(dma_addr); @@ -160,13 +159,12 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) } } -static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) +static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) { struct ssp_cmd_iu *cmd_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); - cmd_iu = &sci_req->ssp.cmd; + cmd_iu = &ireq->ssp.cmd; memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); cmd_iu->add_cdb_len = 0; @@ -181,14 +179,13 @@ static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sc sizeof(task->ssp_task.cdb) / sizeof(u32)); } -static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) +static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) { struct ssp_task_iu *task_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - task_iu = &sci_req->ssp.tmf; + task_iu = &ireq->ssp.tmf; memset(task_iu, 0, sizeof(struct ssp_task_iu)); @@ -208,15 +205,15 @@ static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci * */ static void scu_ssp_reqeust_construct_task_context( - struct scic_sds_request *sds_request, + struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct scic_sds_remote_device *target_device; struct scic_sds_port *target_port; - target_device = scic_sds_request_get_device(sds_request); - target_port = scic_sds_request_get_port(sds_request); + target_device = scic_sds_request_get_device(ireq); + target_port = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -232,7 +229,7 @@ static void scu_ssp_reqeust_construct_task_context( task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = - scic_sds_remote_device_get_index(sds_request->target_device); + scic_sds_remote_device_get_index(ireq->target_device); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -244,22 +241,21 @@ static void scu_ssp_reqeust_construct_task_context( task_context->address_modifier = 0; - /* task_context->type.ssp.tag = sci_req->io_tag; */ + /* task_context->type.ssp.tag = ireq->io_tag; */ task_context->task_phase = 0x01; - sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (scic_sds_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(target_port) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sds_request->io_tag)); + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.cmd); + dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); @@ -268,8 +264,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the response buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.rsp); + dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); task_context->response_iu_upper = upper_32_bits(dma_addr); task_context->response_iu_lower = lower_32_bits(dma_addr); @@ -280,13 +275,13 @@ static void scu_ssp_reqeust_construct_task_context( * @sci_req: * */ -static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, +static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, enum dma_data_direction dir, u32 len) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_ssp_reqeust_construct_task_context(sci_req, task_context); + scu_ssp_reqeust_construct_task_context(ireq, task_context); task_context->ssp_command_iu_length = sizeof(struct ssp_cmd_iu) / sizeof(u32); @@ -306,7 +301,7 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s task_context->transfer_length_bytes = len; if (task_context->transfer_length_bytes > 0) - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); } /** @@ -322,11 +317,11 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s * constructed. * */ -static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) +static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_ssp_reqeust_construct_task_context(sci_req, task_context); + scu_ssp_reqeust_construct_task_context(ireq, task_context); task_context->control_frame = 1; task_context->priority = SCU_TASK_PRIORITY_HIGH; @@ -350,15 +345,15 @@ static void scu_ssp_task_request_construct_task_context(struct scic_sds_request * determine what is common for SSP/SMP/STP task context structures. */ static void scu_sata_reqeust_construct_task_context( - struct scic_sds_request *sci_req, + struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct scic_sds_remote_device *target_device; struct scic_sds_port *target_port; - target_device = scic_sds_request_get_device(sci_req); - target_port = scic_sds_request_get_port(sci_req); + target_device = scic_sds_request_get_device(ireq); + target_port = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -374,7 +369,7 @@ static void scu_sata_reqeust_construct_task_context( task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = - scic_sds_remote_device_get_index(sci_req->target_device); + scic_sds_remote_device_get_index(ireq->target_device); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -391,21 +386,21 @@ static void scu_sata_reqeust_construct_task_context( (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); /* Set the first word of the H2D REG FIS */ - task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; + task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; - sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (scic_sds_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(target_port) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sci_req->io_tag)); + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC. */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->stp.cmd) + + dma_addr = scic_io_request_get_dma_addr(ireq, + ((char *) &ireq->stp.cmd) + sizeof(u32)); task_context->command_iu_upper = upper_32_bits(dma_addr); @@ -416,11 +411,11 @@ static void scu_sata_reqeust_construct_task_context( task_context->response_iu_lower = 0; } -static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) +static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_sata_reqeust_construct_task_context(sci_req, task_context); + scu_sata_reqeust_construct_task_context(ireq, task_context); task_context->control_frame = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; @@ -429,20 +424,19 @@ static void scu_stp_raw_request_construct_task_context(struct scic_sds_request * task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); } -static enum sci_status -scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, - bool copy_rx_frame) +static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, + bool copy_rx_frame) { - struct isci_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &ireq->stp.req; - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); stp_req->status = 0; stp_req->sgl.offset = 0; stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ @@ -464,18 +458,18 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ -static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, +static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, u8 optimized_task_type, u32 len, enum dma_data_direction dir) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(sci_req, task_context); + scu_sata_reqeust_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); /* Copy over the number of bytes to be transfered */ task_context->transfer_length_bytes = len; @@ -500,13 +494,12 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc static enum sci_status -scic_io_request_construct_sata(struct scic_sds_request *sci_req, +scic_io_request_construct_sata(struct isci_request *ireq, u32 len, enum dma_data_direction dir, bool copy) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); /* check for management protocols */ @@ -515,20 +508,20 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } else { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); + __func__, ireq, tmf->tmf_code); return SCI_FAILURE; } } if (!sas_protocol_ata(task->task_proto)) { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Non-ATA protocol in SATA path: 0x%x\n", __func__, task->task_proto); @@ -538,13 +531,13 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, /* non data */ if (task->data_dir == DMA_NONE) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } /* NCQ */ if (task->ata_task.use_ncq) { - scic_sds_stp_optimized_request_construct(sci_req, + scic_sds_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_FPDMAQ_READ, len, dir); return SCI_SUCCESS; @@ -552,74 +545,71 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, /* DMA */ if (task->ata_task.dma_xfer) { - scic_sds_stp_optimized_request_construct(sci_req, + scic_sds_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_DMA_IN, len, dir); return SCI_SUCCESS; } else /* PIO */ - return scic_sds_stp_pio_request_construct(sci_req, copy); + return scic_sds_stp_pio_request_construct(ireq, copy); return status; } -static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) +static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) { - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); - sci_req->protocol = SCIC_SSP_PROTOCOL; + ireq->protocol = SCIC_SSP_PROTOCOL; - scu_ssp_io_request_construct_task_context(sci_req, + scu_ssp_io_request_construct_task_context(ireq, task->data_dir, task->total_xfer_len); - scic_sds_io_request_build_ssp_command_iu(sci_req); + scic_sds_io_request_build_ssp_command_iu(ireq); - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } enum sci_status scic_task_request_construct_ssp( - struct scic_sds_request *sci_req) + struct isci_request *ireq) { /* Construct the SSP Task SCU Task Context */ - scu_ssp_task_request_construct_task_context(sci_req); + scu_ssp_task_request_construct_task_context(ireq); /* Fill in the SSP Task IU */ - scic_sds_task_request_build_ssp_task_iu(sci_req); + scic_sds_task_request_build_ssp_task_iu(ireq); - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) +static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) { enum sci_status status; bool copy = false; - struct isci_request *isci_request = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(isci_request); + struct sas_task *task = isci_request_access_task(ireq); - sci_req->protocol = SCIC_STP_PROTOCOL; + ireq->protocol = SCIC_STP_PROTOCOL; copy = (task->data_dir == DMA_NONE) ? false : true; - status = scic_io_request_construct_sata(sci_req, + status = scic_io_request_construct_sata(ireq, task->total_xfer_len, task->data_dir, copy); if (status == SCI_SUCCESS) - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return status; } -enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) +enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); /* check for management protocols */ if (ireq->ttype == tmf_task) { @@ -627,12 +617,12 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); } else { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Request 0x%p received un-handled SAT " "Protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); + __func__, ireq, tmf->tmf_code); return SCI_FAILURE; } @@ -640,7 +630,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (status != SCI_SUCCESS) return status; - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return status; } @@ -650,9 +640,9 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re * @sci_req: request that was terminated early */ #define SCU_TASK_CONTEXT_SRAM 0x200000 -static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) +static u32 sci_req_tx_bytes(struct isci_request *ireq) { - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; u32 ret_val = 0; if (readl(&scic->smu_registers->address_modifier) == 0) { @@ -666,19 +656,19 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) */ ret_val = readl(scu_reg_base + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + - ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag))); + ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); } return ret_val; } -enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) +enum sci_status scic_sds_request_start(struct isci_request *ireq) { enum sci_base_request_states state; - struct scu_task_context *tc = sci_req->tc; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scu_task_context *tc = ireq->tc; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { dev_warn(scic_to_dev(scic), "%s: SCIC IO Request requested to start while in wrong " @@ -686,19 +676,19 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) return SCI_FAILURE_INVALID_STATE; } - tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); + tc->task_index = ISCI_TAG_TCI(ireq->io_tag); switch (tc->protocol_type) { case SCU_TASK_CONTEXT_PROTOCOL_SMP: case SCU_TASK_CONTEXT_PROTOCOL_SSP: /* SSP/SMP Frame */ - tc->type.ssp.tag = sci_req->io_tag; + tc->type.ssp.tag = ireq->io_tag; tc->type.ssp.target_port_transfer_tag = 0xFFFF; break; case SCU_TASK_CONTEXT_PROTOCOL_STP: /* STP/SATA Frame - * tc->type.stp.ncq_tag = sci_req->ncq_tag; + * tc->type.stp.ncq_tag = ireq->ncq_tag; */ break; @@ -713,28 +703,28 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) } /* Add to the post_context the io tag value */ - sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); + ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); /* Everything is good go ahead and change state */ - sci_change_state(&sci_req->sm, SCI_REQ_STARTED); + sci_change_state(&ireq->sm, SCI_REQ_STARTED); return SCI_SUCCESS; } enum sci_status -scic_sds_io_request_terminate(struct scic_sds_request *sci_req) +scic_sds_io_request_terminate(struct isci_request *ireq) { enum sci_base_request_states state; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_CONSTRUCTED: - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STARTED: case SCI_REQ_TASK_WAIT_TC_COMP: @@ -751,54 +741,54 @@ scic_sds_io_request_terminate(struct scic_sds_request *sci_req) case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: - sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; case SCI_REQ_TASK_WAIT_TC_RESP: - sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_ABORTING: - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: - dev_warn(scic_to_dev(sci_req->owning_controller), + dev_warn(scic_to_dev(ireq->owning_controller), "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, - sci_req->sm.current_state_id); + ireq->sm.current_state_id); break; } return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) +enum sci_status scic_sds_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (WARN_ONCE(state != SCI_REQ_COMPLETED, "isci: request completion from wrong state (%d)\n", state)) return SCI_FAILURE_INVALID_STATE; - if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) + if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) scic_sds_controller_release_frame(scic, - sci_req->saved_rx_frame_index); + ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ - sci_change_state(&sci_req->sm, SCI_REQ_FINAL); + sci_change_state(&ireq->sm, SCI_REQ_FINAL); return SCI_SUCCESS; } -enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, +enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (state != SCI_REQ_STP_PIO_DATA_IN) { dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", @@ -812,7 +802,7 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r /* We are waiting for data and the SCU has R_ERR the data frame. * Go back to waiting for the D2H Register FIS */ - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default: dev_err(scic_to_dev(scic), @@ -832,15 +822,14 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r * @sci_req: This parameter specifies the request object for which to copy * the response data. */ -static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) +static void scic_sds_io_request_copy_response(struct isci_request *ireq) { void *resp_buf; u32 len; struct ssp_response_iu *ssp_response; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - ssp_response = &sci_req->ssp.rsp; + ssp_response = &ireq->ssp.rsp; resp_buf = &isci_tmf->resp.resp_iu; @@ -852,7 +841,7 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) } static enum sci_status -request_started_state_tc_event(struct scic_sds_request *sci_req, +request_started_state_tc_event(struct isci_request *ireq, u32 completion_code) { struct ssp_response_iu *resp_iu; @@ -863,7 +852,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -875,19 +864,19 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, * truly a failed request or a good request that just got * completed early. */ - struct ssp_response_iu *resp = &sci_req->ssp.rsp; + struct ssp_response_iu *resp = &ireq->ssp.rsp; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, word_cnt); if (resp->status == 0) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS_IO_DONE_EARLY); } else { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } @@ -896,11 +885,11 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, word_cnt); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -911,15 +900,15 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, * guaranteed to be received before this completion status is * posted? */ - resp_iu = &sci_req->ssp.rsp; + resp_iu = &ireq->ssp.rsp; datapres = resp_iu->datapres; if (datapres == 1 || datapres == 2) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -935,13 +924,13 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): - if (sci_req->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status(sci_req, + if (ireq->protocol == SCIC_STP_PROTOCOL) { + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); } else { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -959,7 +948,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); @@ -983,7 +972,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: scic_sds_request_set_status( - sci_req, + ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -995,21 +984,21 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, */ /* In all cases we will treat this as the completion of the IO req. */ - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; } static enum sci_status -request_aborting_state_tc_event(struct scic_sds_request *sci_req, +request_aborting_state_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: @@ -1022,15 +1011,15 @@ request_aborting_state_tc_event(struct scic_sds_request *sci_req, return SCI_SUCCESS; } -static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req, +static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): /* Currently, the decision is to simply allow the task request @@ -1038,12 +1027,12 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * * There is a potential for receiving multiple task responses if * we decide to send the task IU again. */ - dev_warn(scic_to_dev(sci_req->owning_controller), + dev_warn(scic_to_dev(ireq->owning_controller), "%s: TaskRequest:0x%p CompletionCode:%x - " - "ACK/NAK timeout\n", __func__, sci_req, + "ACK/NAK timeout\n", __func__, ireq, completion_code); - sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; default: /* @@ -1051,11 +1040,11 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1063,7 +1052,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * } static enum sci_status -smp_request_await_response_tc_event(struct scic_sds_request *sci_req, +smp_request_await_response_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { @@ -1072,10 +1061,10 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req, * unexpected. but if the TC has success status, we * complete the IO anyway. */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): @@ -1089,21 +1078,21 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req, * these SMP_XXX_XX_ERR status. For these type of error, * we ask scic user to retry the request. */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1111,50 +1100,50 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req, } static enum sci_status -smp_request_await_tc_event(struct scic_sds_request *sci_req, +smp_request_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } -void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, +void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag) { /** * @note This could be made to return an error to the user if the user * attempts to set the NCQ tag in the wrong state. */ - req->tc->type.stp.ncq_tag = ncq_tag; + ireq->tc->type.stp.ncq_tag = ncq_tag; } static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) { struct scu_sgl_element *sgl; struct scu_sgl_element_pair *sgl_pair; - struct scic_sds_request *sci_req = to_sci_req(stp_req); + struct isci_request *ireq = to_ireq(stp_req); struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; - sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); if (!sgl_pair) sgl = NULL; else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { @@ -1172,7 +1161,7 @@ static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) } else { pio_sgl->index++; pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; - sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); sgl = &sgl_pair->A; } } @@ -1181,15 +1170,15 @@ static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) } static enum sci_status -stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, +stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); + sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); break; default: @@ -1197,11 +1186,11 @@ stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1214,18 +1203,18 @@ stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, * parameter length. current sgl and offset is alreay stored in the IO request */ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( - struct scic_sds_request *sci_req, + struct isci_request *ireq, u32 length) { - struct isci_stp_request *stp_req = &sci_req->stp.req; - struct scu_task_context *task_context = sci_req->tc; + struct isci_stp_request *stp_req = &ireq->stp.req; + struct scu_task_context *task_context = ireq->tc; struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *current_sgl; /* Recycle the TC and reconstruct it for sending out DATA FIS containing * for the data from current_sgl+offset for the input length */ - sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) current_sgl = &sgl_pair->A; else @@ -1238,12 +1227,12 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( task_context->type.stp.fis_type = FIS_DATA; /* send the new TC out. */ - return scic_controller_continue_io(sci_req); + return scic_controller_continue_io(ireq); } -static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) +static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) { - struct isci_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *sgl; enum sci_status status; @@ -1251,7 +1240,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc u32 len = 0; offset = stp_req->sgl.offset; - sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) return SCI_FAILURE; @@ -1267,7 +1256,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc return SCI_SUCCESS; if (stp_req->pio_len >= len) { - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, len); + status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len); if (status != SCI_SUCCESS) return status; stp_req->pio_len -= len; @@ -1276,7 +1265,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc sgl = pio_sgl_next(stp_req); offset = 0; } else if (stp_req->pio_len < len) { - scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->pio_len); + scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); /* Sgl offset will be adjusted and saved for future */ offset += stp_req->pio_len; @@ -1302,7 +1291,6 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, u8 *data_buf, u32 len) { - struct scic_sds_request *sci_req; struct isci_request *ireq; u8 *src_addr; int copy_len; @@ -1311,8 +1299,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r void *kaddr; int total_len = len; - sci_req = to_sci_req(stp_req); - ireq = sci_req_to_ireq(sci_req); + ireq = to_ireq(stp_req); task = isci_request_access_task(ireq); src_addr = data_buf; @@ -1373,18 +1360,18 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( } static enum sci_status -stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, +stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, u32 completion_code) { enum sci_status status = SCI_SUCCESS; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; default: @@ -1392,11 +1379,11 @@ stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1404,18 +1391,18 @@ stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, } static enum sci_status -pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, +pio_data_out_tx_done_tc_event(struct isci_request *ireq, u32 completion_code) { enum sci_status status = SCI_SUCCESS; bool all_frames_transferred = false; - struct isci_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &ireq->stp.req; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ if (stp_req->pio_len != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); if (status == SCI_SUCCESS) { if (stp_req->pio_len == 0) all_frames_transferred = true; @@ -1433,7 +1420,7 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, /* * Change the state to SCI_REQ_STP_PIO_DATA_IN * and wait for PIO_SETUP fis / or D2H REg fis. */ - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } break; @@ -1444,11 +1431,11 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, * the request. */ scic_sds_request_set_status( - sci_req, + ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1456,18 +1443,18 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, } static void scic_sds_stp_request_udma_complete_request( - struct scic_sds_request *request, + struct isci_request *ireq, u32 scu_status, enum sci_status sci_status) { - scic_sds_request_set_status(request, scu_status, sci_status); - sci_change_state(&request->sm, SCI_REQ_COMPLETED); + scic_sds_request_set_status(ireq, scu_status, sci_status); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } -static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, +static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; struct dev_to_host_fis *frame_header; enum sci_status status; u32 *frame_buffer; @@ -1482,7 +1469,7 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + scic_sds_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); } @@ -1493,16 +1480,16 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc } enum sci_status -scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, +scic_sds_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) { - struct scic_sds_controller *scic = sci_req->owning_controller; - struct isci_stp_request *stp_req = &sci_req->stp.req; + struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_stp_request *stp_req = &ireq->stp.req; enum sci_base_request_states state; enum sci_status status; ssize_t word_cnt; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_STARTED: { struct ssp_frame_hdr ssp_hdr; @@ -1523,24 +1510,24 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, frame_index, (void **)&resp_iu); - sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt); + sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); - resp_iu = &sci_req->ssp.rsp; + resp_iu = &ireq->ssp.rsp; if (resp_iu->datapres == 0x01 || resp_iu->datapres == 0x02) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); } else - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); } else { /* not a response frame, why did it get forwarded? */ dev_err(scic_to_dev(scic), "%s: SCIC IO Request 0x%p received unexpected " - "frame %d type 0x%02x\n", __func__, sci_req, + "frame %d type 0x%02x\n", __func__, ireq, frame_index, ssp_hdr.frame_type); } @@ -1554,13 +1541,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, } case SCI_REQ_TASK_WAIT_TC_RESP: - scic_sds_io_request_copy_response(sci_req); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + scic_sds_io_request_copy_response(ireq); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); scic_sds_controller_release_frame(scic,frame_index); return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { - struct smp_resp *rsp_hdr = &sci_req->smp.rsp; + struct smp_resp *rsp_hdr = &ireq->smp.rsp; void *frame_header; scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, @@ -1584,10 +1571,10 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, smp_resp, word_cnt); - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP); + sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); } else { /* * This was not a response frame why did it get @@ -1597,15 +1584,15 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, "%s: SCIC SMP Request 0x%p received unexpected " "frame %d type 0x%02x\n", __func__, - sci_req, + ireq, frame_index, rsp_hdr->frame_type); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_FRM_TYPE_ERR, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } scic_sds_controller_release_frame(scic, frame_index); @@ -1614,18 +1601,18 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, } case SCI_REQ_STP_UDMA_WAIT_TC_COMP: - return scic_sds_stp_request_udma_general_frame_handler(sci_req, + return scic_sds_stp_request_udma_general_frame_handler(ireq, frame_index); case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(sci_req, + status = scic_sds_stp_request_udma_general_frame_handler(ireq, frame_index); if (status != SCI_SUCCESS) return status; - scic_sds_stp_request_udma_complete_request(sci_req, + scic_sds_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1657,12 +1644,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + scic_sds_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1672,12 +1659,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, "violation occurred\n", __func__, stp_req, frame_index); - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; } - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); @@ -1686,7 +1673,6 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, } case SCI_REQ_STP_PIO_WAIT_FRAME: { - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct dev_to_host_fis *frame_header; u32 *frame_buffer; @@ -1722,28 +1708,28 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, /* status: 4th byte in the 3rd dword */ stp_req->status = (frame_buffer[2] >> 24) & 0xff; - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + scic_sds_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); - sci_req->stp.rsp.status = stp_req->status; + ireq->stp.rsp.status = stp_req->status; /* The next state is dependent on whether the * request was PIO Data-in or Data out */ if (task->data_dir == DMA_FROM_DEVICE) { - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); } else if (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */ - status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); + status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); if (status != SCI_SUCCESS) break; - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); } break; case FIS_SETDEVBITS: - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; case FIS_REGD2H: @@ -1767,15 +1753,15 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&sci_req->stp.req, + scic_sds_controller_copy_sata_response(&ireq->stp.req, frame_header, frame_buffer); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: @@ -1818,11 +1804,11 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, frame_index, frame_header->fis_type); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); @@ -1830,7 +1816,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, } if (stp_req->sgl.index < 0) { - sci_req->saved_rx_frame_index = frame_index; + ireq->saved_rx_frame_index = frame_index; stp_req->pio_len = 0; } else { scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, @@ -1851,13 +1837,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, return status; if ((stp_req->status & ATA_BUSY) == 0) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } else { - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } return status; } @@ -1886,12 +1872,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, + scic_sds_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1904,13 +1890,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, stp_req, frame_index); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; } - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ scic_sds_controller_release_frame(scic, frame_index); @@ -1938,14 +1924,14 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, } } -static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req, +static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, u32 completion_code) { enum sci_status status = SCI_SUCCESS; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_stp_request_udma_complete_request(sci_req, + scic_sds_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -1955,11 +1941,11 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * * Register FIS was received before we got the TC * completion. */ - if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { - scic_sds_remote_device_suspend(sci_req->target_device, + if (ireq->stp.rsp.fis_type == FIS_REGD2H) { + scic_sds_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - scic_sds_stp_request_udma_complete_request(sci_req, + scic_sds_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else { @@ -1968,7 +1954,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * * the device so we must change state to wait * for it */ - sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H); + sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); } break; @@ -1983,12 +1969,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): - scic_sds_remote_device_suspend(sci_req->target_device, + scic_sds_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ - scic_sds_stp_request_udma_complete_request(sci_req, + scic_sds_stp_request_udma_complete_request(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); break; @@ -1998,15 +1984,15 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request * } static enum sci_status -stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, +stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); + sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); break; default: @@ -2015,11 +2001,11 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_ * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -2027,15 +2013,15 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_ } static enum sci_status -stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req, +stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); + sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); break; default: @@ -2043,11 +2029,11 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sc * a NAK was received, then it is up to the user to retry the * request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -2055,54 +2041,54 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sc } enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, +scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) { enum sci_base_request_states state; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_STARTED: - return request_started_state_tc_event(sci_req, completion_code); + return request_started_state_tc_event(ireq, completion_code); case SCI_REQ_TASK_WAIT_TC_COMP: - return ssp_task_request_await_tc_event(sci_req, + return ssp_task_request_await_tc_event(ireq, completion_code); case SCI_REQ_SMP_WAIT_RESP: - return smp_request_await_response_tc_event(sci_req, + return smp_request_await_response_tc_event(ireq, completion_code); case SCI_REQ_SMP_WAIT_TC_COMP: - return smp_request_await_tc_event(sci_req, completion_code); + return smp_request_await_tc_event(ireq, completion_code); case SCI_REQ_STP_UDMA_WAIT_TC_COMP: - return stp_request_udma_await_tc_event(sci_req, + return stp_request_udma_await_tc_event(ireq, completion_code); case SCI_REQ_STP_NON_DATA_WAIT_H2D: - return stp_request_non_data_await_h2d_tc_event(sci_req, + return stp_request_non_data_await_h2d_tc_event(ireq, completion_code); case SCI_REQ_STP_PIO_WAIT_H2D: - return stp_request_pio_await_h2d_completion_tc_event(sci_req, + return stp_request_pio_await_h2d_completion_tc_event(ireq, completion_code); case SCI_REQ_STP_PIO_DATA_OUT: - return pio_data_out_tx_done_tc_event(sci_req, completion_code); + return pio_data_out_tx_done_tc_event(ireq, completion_code); case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: - return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, + return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq, completion_code); case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: - return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, + return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq, completion_code); case SCI_REQ_ABORTING: - return request_aborting_state_tc_event(sci_req, + return request_aborting_state_tc_event(ireq, completion_code); default: @@ -2201,7 +2187,7 @@ static void isci_request_handle_controller_specific_errors( { unsigned int cstatus; - cstatus = request->sci.scu_status; + cstatus = request->scu_status; dev_dbg(&request->isci_host->pdev->dev, "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " @@ -2640,13 +2626,13 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, task); if (sas_protocol_ata(task->task_proto)) { - resp_buf = &request->sci.stp.rsp; + resp_buf = &request->stp.rsp; isci_request_process_stp_response(task, resp_buf); } else if (SAS_PROTOCOL_SSP == task->task_proto) { /* crack the iu response buffer. */ - resp_iu = &request->sci.ssp.rsp; + resp_iu = &request->ssp.rsp; isci_request_process_response_iu(task, resp_iu, &isci_host->pdev->dev); @@ -2677,7 +2663,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); if (task->task_proto == SAS_PROTOCOL_SMP) { - void *rsp = &request->sci.smp.rsp; + void *rsp = &request->smp.rsp; dev_dbg(&isci_host->pdev->dev, "%s: SMP protocol completion\n", @@ -2693,7 +2679,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, * There is a possibility that less data than * the maximum was transferred. */ - u32 transferred_length = sci_req_tx_bytes(&request->sci); + u32 transferred_length = sci_req_tx_bytes(request); task->task_status.residual = task->total_xfer_len - transferred_length; @@ -2851,8 +2837,8 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* complete the io request to the core. */ scic_controller_complete_io(&isci_host->sci, - request->sci.target_device, - &request->sci); + request->target_device, + request); isci_put_device(idev); /* set terminated handle so it cannot be completed or @@ -2864,9 +2850,8 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); - struct isci_request *ireq = sci_req_to_ireq(sci_req); - struct domain_device *dev = sci_dev_to_domain(sci_req->target_device); + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct domain_device *dev = sci_dev_to_domain(ireq->target_device); struct sas_task *task; /* XXX as hch said always creating an internal sas_task for tmf @@ -2902,66 +2887,65 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); - struct scic_sds_controller *scic = sci_req->owning_controller; + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct scic_sds_controller *scic = ireq->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); - struct isci_request *ireq = sci_req_to_ireq(sci_req); /* Tell the SCI_USER that the IO request is complete */ if (!test_bit(IREQ_TMF, &ireq->flags)) isci_request_io_request_complete(ihost, ireq, - sci_req->sci_status); + ireq->sci_status); else - isci_task_request_complete(ihost, ireq, sci_req->sci_status); + isci_task_request_complete(ihost, ireq, ireq->sci_status); } static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); /* Setting the abort bit in the Task Context is required by the silicon. */ - sci_req->tc->abort = 1; + ireq->tc->abort = 1; } static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(sci_req->target_device, - sci_req); + scic_sds_remote_device_set_working_request(ireq->target_device, + ireq); } static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(sci_req->target_device, - sci_req); + scic_sds_remote_device_set_working_request(ireq->target_device, + ireq); } static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(sci_req->target_device, - sci_req); + scic_sds_remote_device_set_working_request(ireq->target_device, + ireq); } static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) { - struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); - struct scu_task_context *tc = sci_req->tc; + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct scu_task_context *tc = ireq->tc; struct host_to_dev_fis *h2d_fis; enum sci_status status; /* Clear the SRST bit */ - h2d_fis = &sci_req->stp.cmd; + h2d_fis = &ireq->stp.cmd; h2d_fis->control = 0; /* Clear the TC control bit */ tc->control_frame = 0; - status = scic_controller_continue_io(sci_req); + status = scic_controller_continue_io(ireq); WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); } @@ -3006,29 +2990,29 @@ static const struct sci_base_state scic_sds_request_state_table[] = { static void scic_sds_general_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { - sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT); + sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); - sci_req->target_device = sci_dev; - sci_req->protocol = SCIC_NO_PROTOCOL; - sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; + ireq->target_device = sci_dev; + ireq->protocol = SCIC_NO_PROTOCOL; + ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; - sci_req->sci_status = SCI_SUCCESS; - sci_req->scu_status = 0; - sci_req->post_context = 0xFFFFFFFF; + ireq->sci_status = SCI_SUCCESS; + ireq->scu_status = 0; + ireq->post_context = 0xFFFFFFFF; } static enum sci_status scic_io_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - struct scic_sds_request *sci_req) + struct isci_request *ireq) { struct domain_device *dev = sci_dev_to_domain(sci_dev); enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, sci_req); + scic_sds_general_request_construct(scic, sci_dev, ireq); if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; @@ -3036,31 +3020,31 @@ scic_io_request_construct(struct scic_sds_controller *scic, if (dev->dev_type == SAS_END_DEV) /* pass */; else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) - memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); + memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); else if (dev_is_expander(dev)) /* pass */; else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); + memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); return status; } enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, - u16 io_tag, struct scic_sds_request *sci_req) + u16 io_tag, struct isci_request *ireq) { struct domain_device *dev = sci_dev_to_domain(sci_dev); enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, sci_req); + scic_sds_general_request_construct(scic, sci_dev, ireq); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { - set_bit(IREQ_TMF, &sci_req_to_ireq(sci_req)->flags); - memset(sci_req->tc, 0, sizeof(struct scu_task_context)); + set_bit(IREQ_TMF, &ireq->flags); + memset(ireq->tc, 0, sizeof(struct scu_task_context)); } else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; @@ -3076,7 +3060,7 @@ static enum sci_status isci_request_ssp_request_construct( "%s: request = %p\n", __func__, request); - status = scic_io_request_construct_basic_ssp(&request->sci); + status = scic_io_request_construct_basic_ssp(request); return status; } @@ -3097,7 +3081,7 @@ static enum sci_status isci_request_stp_request_construct( */ register_fis = isci_sata_task_to_fis_copy(task); - status = scic_io_request_construct_basic_sata(&request->sci); + status = scic_io_request_construct_basic_sata(request); /* Set the ncq tag in the fis, from the queue * command in the task. @@ -3115,7 +3099,7 @@ static enum sci_status isci_request_stp_request_construct( static enum sci_status scic_io_request_construct_smp(struct device *dev, - struct scic_sds_request *sci_req, + struct isci_request *ireq, struct sas_task *task) { struct scatterlist *sg = &task->smp_task.smp_req; @@ -3158,14 +3142,14 @@ scic_io_request_construct_smp(struct device *dev, if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) return SCI_FAILURE; - sci_req->protocol = SCIC_SMP_PROTOCOL; + ireq->protocol = SCIC_SMP_PROTOCOL; /* byte swap the smp request. */ - task_context = sci_req->tc; + task_context = ireq->tc; - sci_dev = scic_sds_request_get_device(sci_req); - sci_port = scic_sds_request_get_port(sci_req); + sci_dev = scic_sds_request_get_device(ireq); + sci_port = scic_sds_request_get_port(ireq); /* * Fill in the TC with the its required data @@ -3217,12 +3201,12 @@ scic_io_request_construct_smp(struct device *dev, */ task_context->task_phase = 0; - sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (scic_sds_controller_get_protocol_engine_group(scic) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(sci_port) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sci_req->io_tag)); + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context command buffer should not contain command header. @@ -3234,7 +3218,7 @@ scic_io_request_construct_smp(struct device *dev, task_context->response_iu_upper = 0; task_context->response_iu_lower = 0; - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } @@ -3250,10 +3234,9 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); struct device *dev = &ireq->isci_host->pdev->dev; - struct scic_sds_request *sci_req = &ireq->sci; enum sci_status status = SCI_FAILURE; - status = scic_io_request_construct_smp(dev, sci_req, task); + status = scic_io_request_construct_smp(dev, ireq, task); if (status != SCI_SUCCESS) dev_warn(&ireq->isci_host->pdev->dev, "%s: failed with status = %d\n", @@ -3309,7 +3292,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, } status = scic_io_request_construct(&isci_host->sci, sci_device, - &request->sci); + request); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -3344,7 +3327,7 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t struct isci_request *ireq; ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; - ireq->sci.io_tag = tag; + ireq->io_tag = tag; ireq->io_request_completion = NULL; ireq->flags = 0; ireq->num_sg_entries = 0; @@ -3416,14 +3399,14 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide */ status = scic_controller_start_task(&ihost->sci, &idev->sci, - &ireq->sci); + ireq); } else { status = SCI_FAILURE; } } else { /* send the request, let the core assign the IO TAG. */ status = scic_controller_start_io(&ihost->sci, &idev->sci, - &ireq->sci); + ireq); } if (status != SCI_SUCCESS && @@ -3446,8 +3429,6 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide list_add(&ireq->dev_node, &idev->reqs_in_process); if (status == SCI_SUCCESS) { - /* Save the tag for possible task mgmt later. */ - ireq->io_tag = ireq->sci.io_tag; isci_request_change_state(ireq, started); } else { /* The request did not really start in the diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 7fd98531d1f..68d8a27357e 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -93,7 +93,7 @@ enum sci_request_protocol { * isci_stp_request - extra request infrastructure to handle pio/atapi protocol * @pio_len - number of bytes requested at PIO setup * @status - pio setup ending status value to tell us if we need - * to wait for another fis or if the transfer is complete. Upon + * to wait for another fis or if the transfer is complete. Upon * receipt of a d2h fis this will be the status field of that fis. * @sgl - track pio transfer progress as we iterate through the sgl * @device_cdb_len - atapi device advertises it's transfer constraints at setup @@ -110,69 +110,55 @@ struct isci_stp_request { u32 device_cdb_len; }; -struct scic_sds_request { - /* - * This field contains the information for the base request state - * machine. +struct isci_request { + enum isci_request_status status; + #define IREQ_COMPLETE_IN_TARGET 0 + #define IREQ_TERMINATED 1 + #define IREQ_TMF 2 + #define IREQ_ACTIVE 3 + unsigned long flags; + /* XXX kill ttype and ttype_ptr, allocate full sas_task */ + enum task_type ttype; + union ttype_ptr_union { + struct sas_task *io_task_ptr; /* When ttype==io_task */ + struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ + } ttype_ptr; + struct isci_host *isci_host; + /* For use in the requests_to_{complete|abort} lists: */ + struct list_head completed_node; + /* For use in the reqs_in_process list: */ + struct list_head dev_node; + spinlock_t state_lock; + dma_addr_t request_daddr; + dma_addr_t zero_scatter_daddr; + unsigned int num_sg_entries; + /* Note: "io_request_completion" is completed in two different ways + * depending on whether this is a TMF or regular request. + * - TMF requests are completed in the thread that started them; + * - regular requests are completed in the request completion callback + * function. + * This difference in operation allows the aborter of a TMF request + * to be sure that once the TMF request completes, the I/O that the + * TMF was aborting is guaranteed to have completed. + * + * XXX kill io_request_completion */ + struct completion *io_request_completion; struct sci_base_state_machine sm; - - /* - * This field simply points to the controller to which this IO request - * is associated. - */ struct scic_sds_controller *owning_controller; - - /* - * This field simply points to the remote device to which this IO - * request is associated. - */ struct scic_sds_remote_device *target_device; - - /* - * This field indicates the IO tag for this request. The IO tag is - * comprised of the task_index and a sequence count. The sequence count - * is utilized to help identify tasks from one life to another. - */ u16 io_tag; - - /* - * This field specifies the protocol being utilized for this - * IO request. - */ enum sci_request_protocol protocol; - - /* - * This field indicates the completion status taken from the SCUs - * completion code. It indicates the completion result for the SCU - * hardware. - */ - u32 scu_status; - - /* - * This field indicates the completion status returned to the SCI user. - * It indicates the users view of the io request completion. - */ - u32 sci_status; - - /* - * This field contains the value to be utilized when posting - * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon. - */ + u32 scu_status; /* hardware result */ + u32 sci_status; /* upper layer disposition */ u32 post_context; - struct scu_task_context *tc; - /* could be larger with sg chaining */ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); - - /* - * This field is a pointer to the stored rx frame data. It is used in + /* This field is a pointer to the stored rx frame data. It is used in * STP internal requests and SMP response frames. If this field is * non-NULL the saved frame must be released on IO request completion. - * - * @todo In the future do we want to keep a list of RX frame buffers? */ u32 saved_rx_frame_index; @@ -187,11 +173,9 @@ struct scic_sds_request { u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; }; } ssp; - struct { struct smp_resp rsp; } smp; - struct { struct isci_stp_request req; struct host_to_dev_fis cmd; @@ -200,56 +184,11 @@ struct scic_sds_request { }; }; -static inline struct scic_sds_request *to_sci_req(struct isci_stp_request *stp_req) -{ - struct scic_sds_request *sci_req; - - sci_req = container_of(stp_req, typeof(*sci_req), stp.req); - return sci_req; -} - -struct isci_request { - enum isci_request_status status; - enum task_type ttype; - unsigned short io_tag; - #define IREQ_COMPLETE_IN_TARGET 0 - #define IREQ_TERMINATED 1 - #define IREQ_TMF 2 - #define IREQ_ACTIVE 3 - unsigned long flags; - - union ttype_ptr_union { - struct sas_task *io_task_ptr; /* When ttype==io_task */ - struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ - } ttype_ptr; - struct isci_host *isci_host; - /* For use in the requests_to_{complete|abort} lists: */ - struct list_head completed_node; - /* For use in the reqs_in_process list: */ - struct list_head dev_node; - spinlock_t state_lock; - dma_addr_t request_daddr; - dma_addr_t zero_scatter_daddr; - - unsigned int num_sg_entries; /* returned by pci_alloc_sg */ - - /** Note: "io_request_completion" is completed in two different ways - * depending on whether this is a TMF or regular request. - * - TMF requests are completed in the thread that started them; - * - regular requests are completed in the request completion callback - * function. - * This difference in operation allows the aborter of a TMF request - * to be sure that once the TMF request completes, the I/O that the - * TMF was aborting is guaranteed to have completed. - */ - struct completion *io_request_completion; - struct scic_sds_request sci; -}; - -static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req) +static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req) { - struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci); + struct isci_request *ireq; + ireq = container_of(stp_req, typeof(*ireq), stp.req); return ireq; } @@ -366,32 +305,32 @@ enum sci_base_request_states { * * This macro will return the controller for this io request object */ -#define scic_sds_request_get_controller(sci_req) \ - ((sci_req)->owning_controller) +#define scic_sds_request_get_controller(ireq) \ + ((ireq)->owning_controller) /** * scic_sds_request_get_device() - * * This macro will return the device for this io request object */ -#define scic_sds_request_get_device(sci_req) \ - ((sci_req)->target_device) +#define scic_sds_request_get_device(ireq) \ + ((ireq)->target_device) /** * scic_sds_request_get_port() - * * This macro will return the port for this io request object */ -#define scic_sds_request_get_port(sci_req) \ - scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req)) +#define scic_sds_request_get_port(ireq) \ + scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq)) /** * scic_sds_request_get_post_context() - * * This macro returns the constructed post context result for the io request. */ -#define scic_sds_request_get_post_context(sci_req) \ - ((sci_req)->post_context) +#define scic_sds_request_get_post_context(ireq) \ + ((ireq)->post_context) /** * scic_sds_request_get_task_context() - @@ -413,26 +352,25 @@ enum sci_base_request_states { (request)->sci_status = (sci_status_code); \ } -enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); -enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); +enum sci_status scic_sds_request_start(struct isci_request *ireq); +enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq); enum sci_status -scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, +scic_sds_io_request_event_handler(struct isci_request *ireq, u32 event_code); enum sci_status -scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, +scic_sds_io_request_frame_handler(struct isci_request *ireq, u32 frame_index); enum sci_status -scic_sds_task_request_terminate(struct scic_sds_request *sci_req); +scic_sds_task_request_terminate(struct isci_request *ireq); extern enum sci_status -scic_sds_request_complete(struct scic_sds_request *sci_req); +scic_sds_request_complete(struct isci_request *ireq); extern enum sci_status -scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); +scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code); /* XXX open code in caller */ static inline dma_addr_t -scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) +scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) { - struct isci_request *ireq = sci_req_to_ireq(sci_req); char *requested_addr = (char *)virt_addr; char *base_addr = (char *)ireq; @@ -565,14 +503,14 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, struct scic_sds_remote_device *sci_dev, u16 io_tag, - struct scic_sds_request *sci_req); + struct isci_request *ireq); enum sci_status -scic_task_request_construct_ssp(struct scic_sds_request *sci_req); +scic_task_request_construct_ssp(struct isci_request *ireq); enum sci_status -scic_task_request_construct_sata(struct scic_sds_request *sci_req); +scic_task_request_construct_sata(struct isci_request *ireq); void -scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); -void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); +scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); +void scic_sds_smp_request_copy_response(struct isci_request *ireq); static inline int isci_task_is_ncq_recovery(struct sas_task *task) { diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index e7ce4692446..87d8cc1a6e3 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -70,7 +70,7 @@ struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task) { struct isci_request *ireq = task->lldd_task; - struct host_to_dev_fis *fis = &ireq->sci.stp.cmd; + struct host_to_dev_fis *fis = &ireq->stp.cmd; memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); @@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag( struct isci_request *request = task->lldd_task; register_fis->sector_count = qc->tag << 3; - scic_stp_io_request_set_ncq_tag(&request->sci, qc->tag); + scic_stp_io_request_set_ncq_tag(request, qc->tag); } /** @@ -154,7 +154,6 @@ void isci_request_process_stp_response(struct sas_task *task, enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) { - struct scic_sds_request *sci_req = &ireq->sci; struct isci_tmf *isci_tmf; enum sci_status status; @@ -167,7 +166,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire case isci_tmf_sata_srst_high: case isci_tmf_sata_srst_low: { - struct host_to_dev_fis *fis = &sci_req->stp.cmd; + struct host_to_dev_fis *fis = &ireq->stp.cmd; memset(fis, 0, sizeof(*fis)); @@ -188,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire /* core builds the protocol specific request * based on the h2d fis. */ - status = scic_task_request_construct_sata(&ireq->sci); + status = scic_task_request_construct_sata(ireq); return status; } diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d2dba835489..700708c8267 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -258,7 +258,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, /* let the core do it's construct. */ status = scic_task_request_construct(&ihost->sci, &idev->sci, tag, - &ireq->sci); + ireq); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEV) { isci_tmf->proto = SAS_PROTOCOL_SSP; - status = scic_task_request_construct_ssp(&ireq->sci); + status = scic_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) return NULL; } @@ -337,7 +337,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, /* start the TMF io. */ status = scic_controller_start_task(&ihost->sci, sci_device, - &ireq->sci); + ireq); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -371,7 +371,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, scic_controller_terminate_request(&ihost->sci, &isci_device->sci, - &ireq->sci); + ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -565,7 +565,7 @@ static void isci_terminate_request_core( status = scic_controller_terminate_request( &isci_host->sci, &isci_device->sci, - &isci_request->sci); + isci_request); } spin_unlock_irqrestore(&isci_host->scic_lock, flags); @@ -1235,7 +1235,6 @@ isci_task_request_complete(struct isci_host *ihost, { struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete; - struct scic_sds_request *sci_req = &ireq->sci; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", @@ -1248,18 +1247,18 @@ isci_task_request_complete(struct isci_host *ihost, if (tmf->proto == SAS_PROTOCOL_SSP) { memcpy(&tmf->resp.resp_iu, - &sci_req->ssp.rsp, + &ireq->ssp.rsp, SSP_RESP_IU_MAX_SIZE); } else if (tmf->proto == SAS_PROTOCOL_SATA) { memcpy(&tmf->resp.d2h_fis, - &sci_req->stp.rsp, + &ireq->stp.rsp, sizeof(struct dev_to_host_fis)); } /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci); + scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ -- cgit v1.2.3-70-g09d2 From 78a6f06e0e82125787d7aa308fe28c2c8381540c Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Thu, 30 Jun 2011 16:31:37 -0700 Subject: isci: unify isci_remote_device and scic_sds_remote_device Remove the distinction between these two implementations and unify on isci_remote_device (local instances named idev). Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 60 ++--- drivers/scsi/isci/host.h | 27 +-- drivers/scsi/isci/port.c | 4 +- drivers/scsi/isci/port.h | 6 +- drivers/scsi/isci/remote_device.c | 396 ++++++++++++++++---------------- drivers/scsi/isci/remote_device.h | 148 ++++-------- drivers/scsi/isci/remote_node_context.c | 48 ++-- drivers/scsi/isci/remote_node_context.h | 2 +- drivers/scsi/isci/request.c | 60 +++-- drivers/scsi/isci/request.h | 4 +- drivers/scsi/isci/task.c | 47 ++-- 11 files changed, 365 insertions(+), 437 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 4e11f9e6d76..45d7f71c609 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -272,7 +272,7 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic { u32 index; struct isci_request *ireq; - struct scic_sds_remote_device *device; + struct isci_remote_device *idev; index = SCU_GET_COMPLETION_INDEX(completion_entry); @@ -289,9 +289,9 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: - device = scic->device_table[index]; + idev = scic->device_table[index]; dev_warn(scic_to_dev(scic), "%s: %x for device %p\n", - __func__, completion_entry, device); + __func__, completion_entry, idev); /* @todo For a port RNC operation we need to fail the * device */ @@ -312,7 +312,7 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc struct isci_host *ihost = scic_to_ihost(scic); struct scu_unsolicited_frame_header *frame_header; struct isci_phy *iphy; - struct scic_sds_remote_device *device; + struct isci_remote_device *idev; enum sci_status result = SCI_FAILURE; @@ -348,12 +348,12 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc result = scic_sds_phy_frame_handler(iphy, frame_index); } else { if (index < scic->remote_node_entries) - device = scic->device_table[index]; + idev = scic->device_table[index]; else - device = NULL; + idev = NULL; - if (device != NULL) - result = scic_sds_remote_device_frame_handler(device, frame_index); + if (idev != NULL) + result = scic_sds_remote_device_frame_handler(idev, frame_index); else scic_sds_controller_release_frame(scic, frame_index); } @@ -370,7 +370,7 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci u32 completion_entry) { struct isci_host *ihost = scic_to_ihost(scic); - struct scic_sds_remote_device *device; + struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; u32 index; @@ -426,9 +426,9 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: - device = scic->device_table[index]; - if (device != NULL) - scic_sds_remote_device_event_handler(device, completion_entry); + idev = scic->device_table[index]; + if (idev != NULL) + scic_sds_remote_device_event_handler(idev, completion_entry); else dev_warn(scic_to_dev(scic), "%s: SCIC Controller 0x%p received " @@ -460,10 +460,10 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: case SCU_EVENT_TYPE_RNC_OPS_MISC: if (index < scic->remote_node_entries) { - device = scic->device_table[index]; + idev = scic->device_table[index]; - if (device != NULL) - scic_sds_remote_device_event_handler(device, completion_entry); + if (idev != NULL) + scic_sds_remote_device_event_handler(idev, completion_entry); } else dev_err(scic_to_dev(scic), "%s: SCIC Controller 0x%p received event 0x%x " @@ -2549,13 +2549,13 @@ static bool scic_sds_controller_has_remote_devices_stopping( * object that the remote device has stopped. */ void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev) + struct isci_remote_device *idev) { if (scic->sm.current_state_id != SCIC_STOPPING) { dev_dbg(scic_to_dev(scic), "SCIC Controller 0x%p remote device stopped event " "from device 0x%p in unexpected state %d\n", - scic, sci_dev, + scic, idev, scic->sm.current_state_id); return; } @@ -2622,18 +2622,18 @@ struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 i */ enum sci_status scic_sds_controller_allocate_remote_node_context( struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 *node_id) { u16 node_index; - u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); + u32 remote_node_count = scic_sds_remote_device_node_count(idev); node_index = scic_sds_remote_node_table_allocate_remote_node( &scic->available_remote_nodes, remote_node_count ); if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { - scic->device_table[node_index] = sci_dev; + scic->device_table[node_index] = idev; *node_id = node_index; @@ -2654,12 +2654,12 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( */ void scic_sds_controller_free_remote_node_context( struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 node_id) { - u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); + u32 remote_node_count = scic_sds_remote_device_node_count(idev); - if (scic->device_table[node_id] == sci_dev) { + if (scic->device_table[node_id] == idev) { scic->device_table[node_id] = NULL; scic_sds_remote_node_table_release_remote_node_index( @@ -2798,7 +2798,7 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) * user desires to be utilized for this request. */ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, - struct scic_sds_remote_device *rdev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; @@ -2808,7 +2808,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_io(scic, rdev, ireq); + status = scic_sds_remote_device_start_io(scic, idev, ireq); if (status != SCI_SUCCESS) return status; @@ -2835,7 +2835,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, */ enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, - struct scic_sds_remote_device *rdev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; @@ -2873,7 +2873,7 @@ enum sci_status scic_controller_terminate_request( */ enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, - struct scic_sds_remote_device *rdev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; @@ -2884,7 +2884,7 @@ enum sci_status scic_controller_complete_io( /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: - status = scic_sds_remote_device_complete_io(scic, rdev, ireq); + status = scic_sds_remote_device_complete_io(scic, idev, ireq); if (status != SCI_SUCCESS) return status; @@ -2923,7 +2923,7 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) */ enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, - struct scic_sds_remote_device *rdev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; @@ -2936,7 +2936,7 @@ enum sci_task_status scic_controller_start_task( return SCI_TASK_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_task(scic, rdev, ireq); + status = scic_sds_remote_device_start_task(scic, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index fb8048e5fce..ca2e3b0ee0d 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -163,7 +163,7 @@ struct scic_sds_controller { * objects that need to handle device completion notifications from the * hardware. The table is RNi based. */ - struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; + struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; /** * This field is the free RNi data structure @@ -488,12 +488,12 @@ static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic) #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) /* expander attached sata devices require 3 rnc slots */ -static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev) +static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev) { - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct domain_device *dev = idev->domain_dev; if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && - !sci_dev->is_direct_attached) + !idev->is_direct_attached) return SCU_STP_REMOTE_NODE_COUNT; return SCU_SSP_REMOTE_NODE_COUNT; } @@ -541,11 +541,8 @@ static inline struct device *sciport_to_dev(struct isci_port *iport) return &iport->isci_host->pdev->dev; } -static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev) +static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) { - struct isci_remote_device *idev = - container_of(sci_dev, typeof(*idev), sci); - if (!idev || !idev->isci_port || !idev->isci_port->isci_host) return NULL; @@ -589,11 +586,11 @@ void scic_sds_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer); enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 *node_id); void scic_sds_controller_free_remote_node_context( struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 node_id); union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( struct scic_sds_controller *scic, @@ -622,7 +619,7 @@ void scic_sds_controller_link_down( void scic_sds_controller_remote_device_stopped( struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev); + struct isci_remote_device *idev); void scic_sds_controller_copy_task_context( struct scic_sds_controller *scic, @@ -662,22 +659,22 @@ void scic_controller_disable_interrupts( enum sci_status scic_controller_start_io( struct scic_sds_controller *scic, - struct scic_sds_remote_device *remote_device, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_task_status scic_controller_start_task( struct scic_sds_controller *scic, - struct scic_sds_remote_device *remote_device, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_controller_terminate_request( struct scic_sds_controller *scic, - struct scic_sds_remote_device *remote_device, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_controller_complete_io( struct scic_sds_controller *scic, - struct scic_sds_remote_device *remote_device, + struct isci_remote_device *idev, struct isci_request *ireq); void scic_sds_port_configuration_agent_construct( diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 04591882ee7..df37b1bf7d1 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -1541,7 +1541,7 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, } enum sci_status scic_sds_port_start_io(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum scic_sds_port_states state; @@ -1561,7 +1561,7 @@ enum sci_status scic_sds_port_start_io(struct isci_port *iport, } enum sci_status scic_sds_port_complete_io(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum scic_sds_port_states state; diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index cdea48ece00..b9bc89bf651 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -283,15 +283,15 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, struct isci_phy *iphy); struct isci_request; -struct scic_sds_remote_device; +struct isci_remote_device; enum sci_status scic_sds_port_start_io( struct isci_port *iport, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_port_complete_io( struct isci_port *iport, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq); enum sas_linkrate scic_sds_port_get_max_allowed_speed( diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 627cf731bad..3b0234049a3 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -93,7 +93,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, __func__, idev, ireq); scic_controller_terminate_request(&ihost->sci, - &idev->sci, + idev, ireq); } /* Fall through into the default case... */ @@ -127,15 +127,15 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote */ static void rnc_destruct_done(void *_dev) { - struct scic_sds_remote_device *sci_dev = _dev; + struct isci_remote_device *idev = _dev; - BUG_ON(sci_dev->started_request_count != 0); - sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED); + BUG_ON(idev->started_request_count != 0); + sci_change_state(&idev->sm, SCI_DEV_STOPPED); } -static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds_remote_device *sci_dev) +static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) { - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; + struct scic_sds_controller *scic = idev->owning_port->owning_controller; struct isci_host *ihost = scic_to_ihost(scic); enum sci_status status = SCI_SUCCESS; u32 i; @@ -145,10 +145,10 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds enum sci_status s; if (!test_bit(IREQ_ACTIVE, &ireq->flags) || - ireq->target_device != sci_dev) + ireq->target_device != idev) continue; - s = scic_controller_terminate_request(scic, sci_dev, ireq); + s = scic_controller_terminate_request(scic, idev, ireq); if (s != SCI_SUCCESS) status = s; } @@ -156,10 +156,10 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds return status; } -enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, +enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, u32 timeout) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { @@ -167,16 +167,16 @@ enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, case SCI_DEV_FAILED: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_STOPPED: return SCI_SUCCESS; case SCI_DEV_STARTING: /* device not started so there had better be no requests */ - BUG_ON(sci_dev->started_request_count != 0); - scic_sds_remote_node_context_destruct(&sci_dev->rnc, - rnc_destruct_done, sci_dev); + BUG_ON(idev->started_request_count != 0); + scic_sds_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, idev); /* Transition to the stopping state and wait for the * remote node to complete being posted and invalidated. */ @@ -191,28 +191,28 @@ enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev, case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: sci_change_state(sm, SCI_DEV_STOPPING); - if (sci_dev->started_request_count == 0) { - scic_sds_remote_node_context_destruct(&sci_dev->rnc, - rnc_destruct_done, sci_dev); + if (idev->started_request_count == 0) { + scic_sds_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, idev); return SCI_SUCCESS; } else - return scic_sds_remote_device_terminate_requests(sci_dev); + return scic_sds_remote_device_terminate_requests(idev); break; case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then * try again to terminate. */ - return scic_sds_remote_device_terminate_requests(sci_dev); + return scic_sds_remote_device_terminate_requests(idev); case SCI_DEV_RESETTING: sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; } } -enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev) +enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { @@ -226,7 +226,7 @@ enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev) case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: @@ -240,13 +240,13 @@ enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev) } } -enum sci_status scic_remote_device_reset_complete(struct scic_sds_remote_device *sci_dev) +enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; if (state != SCI_DEV_RESETTING) { - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } @@ -255,28 +255,28 @@ enum sci_status scic_remote_device_reset_complete(struct scic_sds_remote_device return SCI_SUCCESS; } -enum sci_status scic_sds_remote_device_suspend(struct scic_sds_remote_device *sci_dev, +enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, u32 suspend_type) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; if (state != SCI_STP_DEV_CMD) { - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - return scic_sds_remote_node_context_suspend(&sci_dev->rnc, + return scic_sds_remote_node_context_suspend(&idev->rnc, suspend_type, NULL, NULL); } -enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_device *sci_dev, +enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; + struct scic_sds_controller *scic = idev->owning_port->owning_controller; enum sci_status status; switch (state) { @@ -287,7 +287,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi case SCI_SMP_DEV_IDLE: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); /* Return the frame back to the controller */ scic_sds_controller_release_frame(scic, frame_index); @@ -313,7 +313,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi sci_swab32_cpy(&hdr, frame_header, word_cnt); ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); - if (ireq && ireq->target_device == sci_dev) { + if (ireq && ireq->target_device == idev) { /* The IO request is now in charge of releasing the frame */ status = scic_sds_io_request_frame_handler(ireq, frame_index); } else { @@ -335,7 +335,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi if (hdr->fis_type == FIS_SETDEVBITS && (hdr->status & ATA_ERR)) { - sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; /* TODO Check sactive and complete associated IO if any. */ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); @@ -345,8 +345,8 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi * Some devices return D2H FIS when an NCQ error is detected. * Treat this like an SDB error FIS ready reason. */ - sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; - sci_change_state(&sci_dev->sm, SCI_STP_DEV_NCQ_ERROR); + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); } else status = SCI_FAILURE; @@ -359,17 +359,17 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi * in this state. All unsolicited frames are forwarded to the io request * object. */ - status = scic_sds_io_request_frame_handler(sci_dev->working_request, frame_index); + status = scic_sds_io_request_frame_handler(idev->working_request, frame_index); break; } return status; } -static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev) +static bool is_remote_device_ready(struct isci_remote_device *idev) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; switch (state) { @@ -387,10 +387,10 @@ static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev) } } -enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_device *sci_dev, +enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; enum sci_status status; @@ -398,21 +398,21 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi case SCU_EVENT_TYPE_RNC_OPS_MISC: case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: - status = scic_sds_remote_node_context_event_handler(&sci_dev->rnc, event_code); + status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { status = SCI_SUCCESS; /* Suspend the associated RNC */ - scic_sds_remote_node_context_suspend(&sci_dev->rnc, + scic_sds_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); - dev_dbg(scirdev_to_dev(sci_dev), + dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", - __func__, sci_dev, event_code, - is_remote_device_ready(sci_dev) + __func__, idev, event_code, + is_remote_device_ready(idev) ? "I_T_Nexus_Timeout event" : "I_T_Nexus_Timeout event in wrong state"); @@ -420,10 +420,10 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi } /* Else, fall through and treat as unhandled... */ default: - dev_dbg(scirdev_to_dev(sci_dev), + dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", - __func__, sci_dev, event_code, - is_remote_device_ready(sci_dev) + __func__, idev, event_code, + is_remote_device_ready(idev) ? "unexpected event" : "unexpected event in wrong state"); status = SCI_FAILURE_INVALID_STATE; @@ -440,34 +440,34 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) - status = scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL); + status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); } return status; } -static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev, +static void scic_sds_remote_device_start_request(struct isci_remote_device *idev, struct isci_request *ireq, enum sci_status status) { - struct isci_port *iport = sci_dev->owning_port; + struct isci_port *iport = idev->owning_port; /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) - scic_sds_port_complete_io(iport, sci_dev, ireq); + scic_sds_port_complete_io(iport, idev, ireq); else { - kref_get(&sci_dev_to_idev(sci_dev)->kref); - scic_sds_remote_device_increment_request_count(sci_dev); + kref_get(&idev->kref); + scic_sds_remote_device_increment_request_count(idev); } } enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct isci_port *iport = sci_dev->owning_port; + struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { @@ -480,7 +480,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: @@ -489,11 +489,11 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic * successful it will start the request for the port object then * increment its own request count. */ - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; @@ -511,11 +511,11 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic enum scic_sds_remote_device_states new_state; struct sas_task *task = isci_request_access_task(ireq); - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; @@ -526,7 +526,7 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic if (task->ata_task.use_ncq) new_state = SCI_STP_DEV_NCQ; else { - sci_dev->working_request = ireq; + idev->working_request = ireq; new_state = SCI_STP_DEV_CMD; } sci_change_state(sm, new_state); @@ -536,11 +536,11 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; @@ -552,11 +552,11 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; case SCI_SMP_DEV_IDLE: - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; @@ -564,8 +564,8 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic if (status != SCI_SUCCESS) break; - sci_dev->working_request = ireq; - sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD); + idev->working_request = ireq; + sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); break; case SCI_STP_DEV_CMD: case SCI_SMP_DEV_CMD: @@ -575,12 +575,12 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic return SCI_FAILURE_INVALID_STATE; } - scic_sds_remote_device_start_request(sci_dev, ireq, status); + scic_sds_remote_device_start_request(idev, ireq, status); return status; } static enum sci_status common_complete_io(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; @@ -589,21 +589,21 @@ static enum sci_status common_complete_io(struct isci_port *iport, if (status != SCI_SUCCESS) return status; - status = scic_sds_port_complete_io(iport, sci_dev, ireq); + status = scic_sds_port_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - scic_sds_remote_device_decrement_request_count(sci_dev); + scic_sds_remote_device_decrement_request_count(idev); return status; } enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct isci_port *iport = sci_dev->owning_port; + struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { @@ -615,18 +615,18 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s case SCI_DEV_FAILED: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_AWAIT_RESET: case SCI_DEV_RESETTING: - status = common_complete_io(iport, sci_dev, ireq); + status = common_complete_io(iport, idev, ireq); break; case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: - status = common_complete_io(iport, sci_dev, ireq); + status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; @@ -637,54 +637,54 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); - } else if (scic_sds_remote_device_get_request_count(sci_dev) == 0) + } else if (scic_sds_remote_device_get_request_count(idev) == 0) sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: - status = common_complete_io(iport, sci_dev, ireq); + status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; sci_change_state(sm, SCI_SMP_DEV_IDLE); break; case SCI_DEV_STOPPING: - status = common_complete_io(iport, sci_dev, ireq); + status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; - if (scic_sds_remote_device_get_request_count(sci_dev) == 0) - scic_sds_remote_node_context_destruct(&sci_dev->rnc, + if (scic_sds_remote_device_get_request_count(idev) == 0) + scic_sds_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, - sci_dev); + idev); break; } if (status != SCI_SUCCESS) - dev_err(scirdev_to_dev(sci_dev), + dev_err(scirdev_to_dev(idev), "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " "could not complete\n", __func__, iport, - sci_dev, ireq, status); + idev, ireq, status); else - isci_put_device(sci_dev_to_idev(sci_dev)); + isci_put_device(idev); return status; } static void scic_sds_remote_device_continue_request(void *dev) { - struct scic_sds_remote_device *sci_dev = dev; + struct isci_remote_device *idev = dev; /* we need to check if this request is still valid to continue. */ - if (sci_dev->working_request) - scic_controller_continue_io(sci_dev->working_request); + if (idev->working_request) + scic_controller_continue_io(idev->working_request); } enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct isci_port *iport = sci_dev->owning_port; + struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { @@ -698,7 +698,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_STP_DEV_IDLE: @@ -706,11 +706,11 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) goto out; @@ -722,7 +722,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * replace the request that probably resulted in the task * management request. */ - sci_dev->working_request = ireq; + idev->working_request = ireq; sci_change_state(sm, SCI_STP_DEV_CMD); /* The remote node context must cleanup the TCi to NCQ mapping @@ -732,32 +732,32 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * the correct action when the remote node context is suspended * and later resumed. */ - scic_sds_remote_node_context_suspend(&sci_dev->rnc, + scic_sds_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); - scic_sds_remote_node_context_resume(&sci_dev->rnc, + scic_sds_remote_node_context_resume(&idev->rnc, scic_sds_remote_device_continue_request, - sci_dev); + idev); out: - scic_sds_remote_device_start_request(sci_dev, ireq, status); + scic_sds_remote_device_start_request(idev, ireq, status); /* We need to let the controller start request handler know that * it can't post TC yet. We will provide a callback function to * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; case SCI_DEV_READY: - status = scic_sds_port_start_io(iport, sci_dev, ireq); + status = scic_sds_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq); + status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; status = scic_sds_request_start(ireq); break; } - scic_sds_remote_device_start_request(sci_dev, ireq, status); + scic_sds_remote_device_start_request(idev, ireq, status); return status; } @@ -771,15 +771,15 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc * request and then requests the controller to post the request. none */ void scic_sds_remote_device_post_request( - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u32 request) { u32 context; - context = scic_sds_remote_device_build_command_context(sci_dev, request); + context = scic_sds_remote_device_build_command_context(idev, request); scic_sds_controller_post_request( - scic_sds_remote_device_get_controller(sci_dev), + scic_sds_remote_device_get_controller(idev), context ); } @@ -790,34 +790,33 @@ void scic_sds_remote_device_post_request( */ static void remote_device_resume_done(void *_dev) { - struct scic_sds_remote_device *sci_dev = _dev; + struct isci_remote_device *idev = _dev; - if (is_remote_device_ready(sci_dev)) + if (is_remote_device_ready(idev)) return; /* go 'ready' if we are not already in a ready state */ - sci_change_state(&sci_dev->sm, SCI_DEV_READY); + sci_change_state(&idev->sm, SCI_DEV_READY); } static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) { - struct scic_sds_remote_device *sci_dev = _dev; - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; + struct isci_remote_device *idev = _dev; + struct scic_sds_controller *scic = idev->owning_port->owning_controller; /* For NCQ operation we do not issue a isci_remote_device_not_ready(). * As a result, avoid sending the ready notification. */ - if (sci_dev->sm.previous_state_id != SCI_STP_DEV_NCQ) + if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) isci_remote_device_ready(scic_to_ihost(scic), idev); } static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); /* Initial state is a transitional state to the stopped state */ - sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED); + sci_change_state(&idev->sm, SCI_DEV_STOPPED); } /** @@ -833,22 +832,22 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac * device isn't valid (e.g. it's already been destoryed, the handle isn't * valid, etc.). */ -static enum sci_status scic_remote_device_destruct(struct scic_sds_remote_device *sci_dev) +static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; struct scic_sds_controller *scic; if (state != SCI_DEV_STOPPED) { - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - scic = sci_dev->owning_port->owning_controller; - scic_sds_controller_free_remote_node_context(scic, sci_dev, - sci_dev->rnc.remote_node_index); - sci_dev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + scic = idev->owning_port->owning_controller; + scic_sds_controller_free_remote_node_context(scic, idev, + idev->rnc.remote_node_index); + idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; sci_change_state(sm, SCI_DEV_FINAL); return SCI_SUCCESS; @@ -871,34 +870,32 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ * io requests in process */ BUG_ON(!list_empty(&idev->reqs_in_process)); - scic_remote_device_destruct(&idev->sci); + scic_remote_device_destruct(idev); list_del_init(&idev->node); isci_put_device(idev); } static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = idev->owning_port->owning_controller; u32 prev_state; /* If we are entering from the stopping state let the SCI User know that * the stop operation has completed. */ - prev_state = sci_dev->sm.previous_state_id; + prev_state = idev->sm.previous_state_id; if (prev_state == SCI_DEV_STOPPING) isci_remote_device_deconstruct(scic_to_ihost(scic), idev); - scic_sds_controller_remote_device_stopped(scic, sci_dev); + scic_sds_controller_remote_device_stopped(scic, idev); } static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); struct isci_host *ihost = scic_to_ihost(scic); - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); @@ -906,27 +903,25 @@ static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_ma static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { - sci_change_state(&sci_dev->sm, SCI_STP_DEV_IDLE); + sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { - sci_change_state(&sci_dev->sm, SCI_SMP_DEV_IDLE); + sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); } else isci_remote_device_ready(scic_to_ihost(scic), idev); } static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SAS_END_DEV) { - struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller; - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); + struct scic_sds_controller *scic = idev->owning_port->owning_controller; isci_remote_device_not_ready(scic_to_ihost(scic), idev, SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); @@ -935,82 +930,81 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); scic_sds_remote_node_context_suspend( - &sci_dev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); + &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); } static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL); + scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); } static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - sci_dev->working_request = NULL; - if (scic_sds_remote_node_context_is_ready(&sci_dev->rnc)) { + idev->working_request = NULL; + if (scic_sds_remote_node_context_is_ready(&idev->rnc)) { /* * Since the RNC is ready, it's alright to finish completion * processing (e.g. signal the remote device is ready). */ - scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(sci_dev); + scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); } else { - scic_sds_remote_node_context_resume(&sci_dev->rnc, + scic_sds_remote_node_context_resume(&idev->rnc, scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler, - sci_dev); + idev); } } static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); - BUG_ON(sci_dev->working_request == NULL); + BUG_ON(idev->working_request == NULL); - isci_remote_device_not_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev), + isci_remote_device_not_ready(scic_to_ihost(scic), idev, SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); } static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); - struct isci_remote_device *idev = sci_dev_to_idev(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); - if (sci_dev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) + if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) isci_remote_device_not_ready(scic_to_ihost(scic), idev, - sci_dev->not_ready_reason); + idev->not_ready_reason); } static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); - isci_remote_device_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev)); + isci_remote_device_ready(scic_to_ihost(scic), idev); } static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); - BUG_ON(sci_dev->working_request == NULL); + BUG_ON(idev->working_request == NULL); - isci_remote_device_not_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev), + isci_remote_device_not_ready(scic_to_ihost(scic), idev, SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); } static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) { - struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm); + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - sci_dev->working_request = NULL; + idev->working_request = NULL; } static const struct sci_base_state scic_sds_remote_device_state_table[] = { @@ -1065,14 +1059,14 @@ static const struct sci_base_state scic_sds_remote_device_state_table[] = { * frees the remote_node_context(s) for the device. */ static void scic_remote_device_construct(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev) + struct isci_remote_device *idev) { - sci_dev->owning_port = iport; - sci_dev->started_request_count = 0; + idev->owning_port = iport; + idev->started_request_count = 0; - sci_init_sm(&sci_dev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL); + sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL); - scic_sds_remote_node_context_construct(&sci_dev->rnc, + scic_sds_remote_node_context_construct(&idev->rnc, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } @@ -1091,21 +1085,21 @@ static void scic_remote_device_construct(struct isci_port *iport, * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev) + struct isci_remote_device *idev) { enum sci_status status; - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct domain_device *dev = idev->domain_dev; - scic_remote_device_construct(iport, sci_dev); + scic_remote_device_construct(iport, idev); /* * This information is request to determine how many remote node context * entries will be needed to store the remote node. */ - sci_dev->is_direct_attached = true; + idev->is_direct_attached = true; status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, - sci_dev, - &sci_dev->rnc.remote_node_index); + idev, + &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) return status; @@ -1116,10 +1110,10 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - sci_dev->connection_rate = scic_sds_port_get_max_allowed_speed(iport); + idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport); /* / @todo Should I assign the port width by reading all of the phys on the port? */ - sci_dev->device_port_width = 1; + idev->device_port_width = 1; return SCI_SUCCESS; } @@ -1137,16 +1131,16 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, - struct scic_sds_remote_device *sci_dev) + struct isci_remote_device *idev) { - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct domain_device *dev = idev->domain_dev; enum sci_status status; - scic_remote_device_construct(iport, sci_dev); + scic_remote_device_construct(iport, idev); status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, - sci_dev, - &sci_dev->rnc.remote_node_index); + idev, + &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) return status; @@ -1163,11 +1157,11 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * connection the logical link rate is that same as the * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay * one another, so this code works for both situations. */ - sci_dev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport), + idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport), dev->linkrate); /* / @todo Should I assign the port width by reading all of the phys on the port? */ - sci_dev->device_port_width = 1; + idev->device_port_width = 1; return SCI_SUCCESS; } @@ -1185,22 +1179,22 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start * the device when there have been no phys added to it. */ -static enum sci_status scic_remote_device_start(struct scic_sds_remote_device *sci_dev, +static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, u32 timeout) { - struct sci_base_state_machine *sm = &sci_dev->sm; + struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; enum sci_status status; if (state != SCI_DEV_STOPPED) { - dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n", + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_node_context_resume(&sci_dev->rnc, + status = scic_sds_remote_node_context_resume(&idev->rnc, remote_device_resume_done, - sci_dev); + idev); if (status != SCI_SUCCESS) return status; @@ -1217,9 +1211,9 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, enum sci_status status; if (dev->parent && dev_is_expander(dev->parent)) - status = scic_remote_device_ea_construct(iport, &idev->sci); + status = scic_remote_device_ea_construct(iport, idev); else - status = scic_remote_device_da_construct(iport, &idev->sci); + status = scic_remote_device_da_construct(iport, idev); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", @@ -1229,7 +1223,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, } /* start the device. */ - status = scic_remote_device_start(&idev->sci, ISCI_REMOTE_DEVICE_START_TIMEOUT); + status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); if (status != SCI_SUCCESS) dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", @@ -1330,7 +1324,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem set_bit(IDEV_STOP_PENDING, &idev->flags); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_stop(&idev->sci, 50); + status = scic_remote_device_stop(idev, 50); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the stop complete callback. */ diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 578d75b8cd1..45798582fc1 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -70,65 +70,14 @@ enum scic_remote_device_not_ready_reason_code { SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX }; -struct scic_sds_remote_device { - /** - * This field contains the information for the base remote device state - * machine. - */ - struct sci_base_state_machine sm; - - /** - * This field is the programmed device port width. This value is - * written to the RCN data structure to tell the SCU how many open - * connections this device can have. - */ - u32 device_port_width; - - /** - * This field is the programmed connection rate for this remote device. It is - * used to program the TC with the maximum allowed connection rate. - */ - enum sas_linkrate connection_rate; - - /** - * This filed is assinged the value of true if the device is directly - * attached to the port. - */ - bool is_direct_attached; - - /** - * This filed contains a pointer back to the port to which this device - * is assigned. - */ - struct isci_port *owning_port; - - /** - * This field contains the SCU silicon remote node context specific - * information. - */ - struct scic_sds_remote_node_context rnc; - - /** - * This field contains the stated request count for the remote device. The - * device can not reach the SCI_DEV_STOPPED until all - * requests are complete and the rnc_posted value is false. - */ - u32 started_request_count; - - /** - * This field contains a pointer to the working request object. It is only - * used only for SATA requests since the unsolicited frames we get from the - * hardware have no Tag value to look up the io request object. - */ - struct isci_request *working_request; - - /** - * This field contains the reason for the remote device going not_ready. It is - * assigned in the state handlers and used in the state transition. - */ - u32 not_ready_reason; -}; - +/** + * isci_remote_device - isci representation of a sas expander / end point + * @device_port_width: hw setting for number of simultaneous connections + * @connection_rate: per-taskcontext connection rate for this device + * @working_request: SATA requests have no tag we for unaccelerated + * protocols we need a method to associate unsolicited + * frames with a pending request + */ struct isci_remote_device { #define IDEV_START_PENDING 0 #define IDEV_STOP_PENDING 1 @@ -143,7 +92,16 @@ struct isci_remote_device { struct domain_device *domain_dev; struct list_head node; struct list_head reqs_in_process; - struct scic_sds_remote_device sci; + struct sci_base_state_machine sm; + u32 device_port_width; + enum sas_linkrate connection_rate; + bool is_direct_attached; + struct isci_port *owning_port; + struct scic_sds_remote_node_context rnc; + /* XXX unify with device reference counting and delete */ + u32 started_request_count; + struct isci_request *working_request; + u32 not_ready_reason; }; #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 @@ -191,7 +149,7 @@ void isci_device_clear_reset_pending(struct isci_host *ihost, * successfully stopped. */ enum sci_status scic_remote_device_stop( - struct scic_sds_remote_device *remote_device, + struct isci_remote_device *idev, u32 timeout); /** @@ -207,7 +165,7 @@ enum sci_status scic_remote_device_stop( * started. */ enum sci_status scic_remote_device_reset( - struct scic_sds_remote_device *remote_device); + struct isci_remote_device *idev); /** * scic_remote_device_reset_complete() - This method informs the device object @@ -220,7 +178,7 @@ enum sci_status scic_remote_device_reset( * is resuming operation. */ enum sci_status scic_remote_device_reset_complete( - struct scic_sds_remote_device *remote_device); + struct isci_remote_device *idev); #define scic_remote_device_is_atapi(device_handle) false @@ -335,27 +293,15 @@ enum scic_sds_remote_device_states { SCI_DEV_FINAL, }; -static inline struct scic_sds_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) +static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) { - struct scic_sds_remote_device *sci_dev; + struct isci_remote_device *idev; - sci_dev = container_of(rnc, typeof(*sci_dev), rnc); - - return sci_dev; -} - -static inline struct isci_remote_device *sci_dev_to_idev(struct scic_sds_remote_device *sci_dev) -{ - struct isci_remote_device *idev = container_of(sci_dev, typeof(*idev), sci); + idev = container_of(rnc, typeof(*idev), rnc); return idev; } -static inline struct domain_device *sci_dev_to_domain(struct scic_sds_remote_device *sci_dev) -{ - return sci_dev_to_idev(sci_dev)->domain_dev; -} - static inline bool dev_is_expander(struct domain_device *dev) { return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; @@ -366,8 +312,8 @@ static inline bool dev_is_expander(struct domain_device *dev) * * This macro incrments the request count for this device */ -#define scic_sds_remote_device_increment_request_count(sci_dev) \ - ((sci_dev)->started_request_count++) +#define scic_sds_remote_device_increment_request_count(idev) \ + ((idev)->started_request_count++) /** * scic_sds_remote_device_decrement_request_count() - @@ -375,44 +321,44 @@ static inline bool dev_is_expander(struct domain_device *dev) * This macro decrements the request count for this device. This count will * never decrment past 0. */ -#define scic_sds_remote_device_decrement_request_count(sci_dev) \ - ((sci_dev)->started_request_count > 0 ? \ - (sci_dev)->started_request_count-- : 0) +#define scic_sds_remote_device_decrement_request_count(idev) \ + ((idev)->started_request_count > 0 ? \ + (idev)->started_request_count-- : 0) /** * scic_sds_remote_device_get_request_count() - * * This is a helper macro to return the current device request count. */ -#define scic_sds_remote_device_get_request_count(sci_dev) \ - ((sci_dev)->started_request_count) +#define scic_sds_remote_device_get_request_count(idev) \ + ((idev)->started_request_count) /** * scic_sds_remote_device_get_controller() - * * This macro returns the controller object that contains this device object */ -#define scic_sds_remote_device_get_controller(sci_dev) \ - scic_sds_port_get_controller(scic_sds_remote_device_get_port(sci_dev)) +#define scic_sds_remote_device_get_controller(idev) \ + scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev)) /** * scic_sds_remote_device_get_port() - * * This macro returns the owning port of this device */ -#define scic_sds_remote_device_get_port(sci_dev) \ - ((sci_dev)->owning_port) +#define scic_sds_remote_device_get_port(idev) \ + ((idev)->owning_port) /** * scic_sds_remote_device_get_controller_peg() - * * This macro returns the controllers protocol engine group */ -#define scic_sds_remote_device_get_controller_peg(sci_dev) \ +#define scic_sds_remote_device_get_controller_peg(idev) \ (\ scic_sds_controller_get_protocol_engine_group(\ scic_sds_port_get_controller(\ - scic_sds_remote_device_get_port(sci_dev) \ + scic_sds_remote_device_get_port(idev) \ ) \ ) \ ) @@ -422,8 +368,8 @@ static inline bool dev_is_expander(struct domain_device *dev) * * This macro returns the remote node index for this device object */ -#define scic_sds_remote_device_get_index(sci_dev) \ - ((sci_dev)->rnc.remote_node_index) +#define scic_sds_remote_device_get_index(idev) \ + ((idev)->rnc.remote_node_index) /** * scic_sds_remote_device_build_command_context() - @@ -448,36 +394,36 @@ static inline bool dev_is_expander(struct domain_device *dev) ((device)->working_request = (request)) enum sci_status scic_sds_remote_device_frame_handler( - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u32 frame_index); enum sci_status scic_sds_remote_device_event_handler( - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u32 event_code); enum sci_status scic_sds_remote_device_start_io( struct scic_sds_controller *controller, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_remote_device_start_task( struct scic_sds_controller *controller, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_remote_device_complete_io( struct scic_sds_controller *controller, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_remote_device_suspend( - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u32 suspend_type); void scic_sds_remote_device_post_request( - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u32 request); -#define scic_sds_remote_device_is_atapi(sci_dev) false +#define scic_sds_remote_device_is_atapi(idev) false #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 1b51fe55314..e485744e126 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -103,22 +103,22 @@ bool scic_sds_remote_node_context_is_ready( static void scic_sds_remote_node_context_construct_buffer( struct scic_sds_remote_node_context *sci_rnc) { - struct scic_sds_remote_device *sci_dev = rnc_to_dev(sci_rnc); - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; struct scic_sds_controller *scic; __le64 sas_addr; - scic = scic_sds_remote_device_get_controller(sci_dev); + scic = scic_sds_remote_device_get_controller(idev); rnc = scic_sds_controller_get_remote_node_context_buffer(scic, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) - * scic_sds_remote_device_node_count(sci_dev)); + * scic_sds_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; - rnc->ssp.remote_node_port_width = sci_dev->device_port_width; - rnc->ssp.logical_port_index = sci_dev->owning_port->physical_port_index; + rnc->ssp.remote_node_port_width = idev->device_port_width; + rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; /* sas address is __be64, context ram format is __le64 */ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); @@ -148,7 +148,7 @@ static void scic_sds_remote_node_context_construct_buffer( rnc->ssp.initial_arbitration_wait_time = 0; /* Open Address Frame Parameters */ - rnc->ssp.oaf_connection_rate = sci_dev->connection_rate; + rnc->ssp.oaf_connection_rate = idev->connection_rate; rnc->ssp.oaf_features = 0; rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; @@ -220,26 +220,26 @@ static void scic_sds_remote_node_context_continue_state_transitions(struct scic_ static void scic_sds_remote_node_context_validate_context_buffer( struct scic_sds_remote_node_context *sci_rnc) { - struct scic_sds_remote_device *sci_dev = rnc_to_dev(sci_rnc); - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; union scu_remote_node_context *rnc_buffer; rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( - scic_sds_remote_device_get_controller(sci_dev), + scic_sds_remote_device_get_controller(idev), sci_rnc->remote_node_index ); rnc_buffer->ssp.is_valid = true; - if (!sci_dev->is_direct_attached && + if (!idev->is_direct_attached && (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { - scic_sds_remote_device_post_request(sci_dev, + scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { - scic_sds_remote_device_post_request(sci_dev, SCU_CONTEXT_COMMAND_POST_RNC_32); + scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); - if (sci_dev->is_direct_attached) { - scic_sds_port_setup_transports(sci_dev->owning_port, + if (idev->is_direct_attached) { + scic_sds_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } } @@ -296,11 +296,11 @@ static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_bas static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - struct scic_sds_remote_device *sci_dev; + struct isci_remote_device *idev; struct domain_device *dev; - sci_dev = rnc_to_dev(rnc); - dev = sci_dev_to_domain(sci_dev); + idev = rnc_to_dev(rnc); + dev = idev->domain_dev; /* * For direct attached SATA devices we need to clear the TLCR @@ -309,11 +309,11 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st * the STPTLDARNI register with the RNi of the device */ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && - sci_dev->is_direct_attached) - scic_sds_port_setup_transports(sci_dev->owning_port, + idev->is_direct_attached) + scic_sds_port_setup_transports(idev->owning_port, rnc->remote_node_index); - scic_sds_remote_device_post_request(sci_dev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); + scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) @@ -564,8 +564,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ sci_rnc->user_cookie = cb_p; return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: { - struct scic_sds_remote_device *sci_dev = rnc_to_dev(sci_rnc); - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); @@ -573,7 +573,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { - if (sci_dev->is_direct_attached) { + if (idev->is_direct_attached) { /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } else { diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 35e6ae61690..7a24c7a12de 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -79,7 +79,7 @@ #define SCI_SOFTWARE_SUSPENSION (1) struct isci_request; -struct scic_sds_remote_device; +struct isci_remote_device; struct scic_sds_remote_node_context; typedef void (*scics_sds_remote_node_context_callback)(void *); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 2d29abf3ce1..90ead662828 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -209,17 +209,17 @@ static void scu_ssp_reqeust_construct_task_context( struct scu_task_context *task_context) { dma_addr_t dma_addr; - struct scic_sds_remote_device *target_device; + struct isci_remote_device *idev; struct isci_port *iport; - target_device = scic_sds_request_get_device(ireq); + idev = scic_sds_request_get_device(ireq); iport = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; task_context->priority = 0; task_context->initiator_request = 1; - task_context->connection_rate = target_device->connection_rate; + task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = scic_sds_controller_get_protocol_engine_group(controller); task_context->logical_port_index = scic_sds_port_get_index(iport); @@ -227,8 +227,7 @@ static void scu_ssp_reqeust_construct_task_context( task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = - scic_sds_remote_device_get_index(ireq->target_device); + task_context->remote_node_index = scic_sds_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -348,17 +347,17 @@ static void scu_sata_reqeust_construct_task_context( struct scu_task_context *task_context) { dma_addr_t dma_addr; - struct scic_sds_remote_device *target_device; + struct isci_remote_device *idev; struct isci_port *iport; - target_device = scic_sds_request_get_device(ireq); + idev = scic_sds_request_get_device(ireq); iport = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->initiator_request = 1; - task_context->connection_rate = target_device->connection_rate; + task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = scic_sds_controller_get_protocol_engine_group(controller); task_context->logical_port_index = @@ -367,8 +366,7 @@ static void scu_sata_reqeust_construct_task_context( task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = - scic_sds_remote_device_get_index(ireq->target_device); + task_context->remote_node_index = scic_sds_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -2850,7 +2848,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - struct domain_device *dev = sci_dev_to_domain(ireq->target_device); + struct domain_device *dev = ireq->target_device->domain_dev; struct sas_task *task; /* XXX as hch said always creating an internal sas_task for tmf @@ -2988,12 +2986,12 @@ static const struct sci_base_state scic_sds_request_state_table[] = { static void scic_sds_general_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); - ireq->target_device = sci_dev; + ireq->target_device = idev; ireq->protocol = SCIC_NO_PROTOCOL; ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; @@ -3004,16 +3002,16 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, static enum sci_status scic_io_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, struct isci_request *ireq) { - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct domain_device *dev = idev->domain_dev; enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, ireq); + scic_sds_general_request_construct(scic, idev, ireq); - if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; if (dev->dev_type == SAS_END_DEV) @@ -3031,14 +3029,14 @@ scic_io_request_construct(struct scic_sds_controller *scic, } enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq) { - struct domain_device *dev = sci_dev_to_domain(sci_dev); + struct domain_device *dev = idev->domain_dev; enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, sci_dev, ireq); + scic_sds_general_request_construct(scic, idev, ireq); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -3102,7 +3100,7 @@ scic_io_request_construct_smp(struct device *dev, struct sas_task *task) { struct scatterlist *sg = &task->smp_task.smp_req; - struct scic_sds_remote_device *sci_dev; + struct isci_remote_device *idev; struct scu_task_context *task_context; struct isci_port *iport; struct smp_req *smp_req; @@ -3147,7 +3145,7 @@ scic_io_request_construct_smp(struct device *dev, task_context = ireq->tc; - sci_dev = scic_sds_request_get_device(ireq); + idev = scic_sds_request_get_device(ireq); iport = scic_sds_request_get_port(ireq); /* @@ -3156,7 +3154,7 @@ scic_io_request_construct_smp(struct device *dev, */ task_context->priority = 0; task_context->initiator_request = 1; - task_context->connection_rate = sci_dev->connection_rate; + task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = scic_sds_controller_get_protocol_engine_group(scic); task_context->logical_port_index = scic_sds_port_get_index(iport); @@ -3166,7 +3164,7 @@ scic_io_request_construct_smp(struct device *dev, task_context->context_type = SCU_TASK_CONTEXT_TYPE; /* 04h */ - task_context->remote_node_index = sci_dev->rnc.remote_node_index; + task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; @@ -3257,17 +3255,16 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) */ static enum sci_status isci_io_request_build(struct isci_host *isci_host, struct isci_request *request, - struct isci_remote_device *isci_device) + struct isci_remote_device *idev) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(request); - struct scic_sds_remote_device *sci_device = &isci_device->sci; dev_dbg(&isci_host->pdev->dev, - "%s: isci_device = 0x%p; request = %p, " + "%s: idev = 0x%p; request = %p, " "num_scatter = %d\n", __func__, - isci_device, + idev, request, task->num_scatter); @@ -3290,8 +3287,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, return SCI_FAILURE_INSUFFICIENT_RESOURCES; } - status = scic_io_request_construct(&isci_host->sci, sci_device, - request); + status = scic_io_request_construct(&isci_host->sci, idev, request); if (status != SCI_SUCCESS) { dev_warn(&isci_host->pdev->dev, @@ -3397,14 +3393,14 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide * ireq->is_task_management_request is false). */ status = scic_controller_start_task(&ihost->sci, - &idev->sci, + idev, ireq); } else { status = SCI_FAILURE; } } else { /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&ihost->sci, &idev->sci, + status = scic_controller_start_io(&ihost->sci, idev, ireq); } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 68d8a27357e..ca64ea207ac 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -146,7 +146,7 @@ struct isci_request { struct completion *io_request_completion; struct sci_base_state_machine sm; struct scic_sds_controller *owning_controller; - struct scic_sds_remote_device *target_device; + struct isci_remote_device *target_device; u16 io_tag; enum sci_request_protocol protocol; u32 scu_status; /* hardware result */ @@ -501,7 +501,7 @@ void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, - struct scic_sds_remote_device *sci_dev, + struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq); enum sci_status diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 700708c8267..89b01eef44b 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -257,7 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(&ihost->sci, &idev->sci, tag, + status = scic_task_request_construct(&ihost->sci, idev, tag, ireq); if (status != SCI_SUCCESS) { @@ -288,12 +288,11 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } int isci_task_execute_tmf(struct isci_host *ihost, - struct isci_remote_device *isci_device, + struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; - struct scic_sds_remote_device *sci_device; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; @@ -310,34 +309,30 @@ int isci_task_execute_tmf(struct isci_host *ihost, /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. */ - if (!isci_device || - (!test_bit(IDEV_IO_READY, &isci_device->flags) && - !test_bit(IDEV_IO_NCQERROR, &isci_device->flags))) { + if (!idev || + (!test_bit(IDEV_IO_READY, &idev->flags) && + !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, - "%s: isci_device = %p not ready (%#lx)\n", + "%s: idev = %p not ready (%#lx)\n", __func__, - isci_device, isci_device ? isci_device->flags : 0); + idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, - "%s: isci_device = %p\n", - __func__, isci_device); - - sci_device = &isci_device->sci; + "%s: idev = %p\n", + __func__, idev); /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; - ireq = isci_task_request_build(ihost, isci_device, tag, tmf); + ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ - status = scic_controller_start_task(&ihost->sci, - sci_device, - ireq); + status = scic_controller_start_task(&ihost->sci, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -355,7 +350,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, isci_request_change_state(ireq, started); /* add the request to the remote device request list. */ - list_add(&ireq->dev_node, &isci_device->reqs_in_process); + list_add(&ireq->dev_node, &idev->reqs_in_process); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -370,7 +365,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); scic_controller_terminate_request(&ihost->sci, - &isci_device->sci, + idev, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -520,13 +515,13 @@ static void isci_request_cleanup_completed_loiterer( * from a thread that can wait. Note that the request is terminated and * completed (back to the host, if started there). * @isci_host: This SCU. - * @isci_device: The target. + * @idev: The target. * @isci_request: The I/O request to be terminated. * */ static void isci_terminate_request_core( struct isci_host *isci_host, - struct isci_remote_device *isci_device, + struct isci_remote_device *idev, struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; @@ -540,7 +535,7 @@ static void isci_terminate_request_core( dev_dbg(&isci_host->pdev->dev, "%s: device = %p; request = %p\n", - __func__, isci_device, isci_request); + __func__, idev, isci_request); spin_lock_irqsave(&isci_host->scic_lock, flags); @@ -564,7 +559,7 @@ static void isci_terminate_request_core( needs_cleanup_handling = true; status = scic_controller_terminate_request( &isci_host->sci, - &isci_device->sci, + idev, isci_request); } spin_unlock_irqrestore(&isci_host->scic_lock, flags); @@ -683,7 +678,7 @@ static void isci_terminate_request_core( } if (needs_cleanup_handling) isci_request_cleanup_completed_loiterer( - isci_host, isci_device, isci_request, task); + isci_host, idev, isci_request, task); } } @@ -694,7 +689,7 @@ static void isci_terminate_request_core( * called from a thread that can wait. Note that the requests are all * terminated and completed (back to the host, if started there). * @isci_host: This parameter specifies SCU. - * @isci_device: This parameter specifies the target. + * @idev: This parameter specifies the target. * */ void isci_terminate_pending_requests(struct isci_host *ihost, @@ -1521,7 +1516,7 @@ static int isci_reset_device(struct isci_host *ihost, dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset(&idev->sci); + status = scic_remote_device_reset(idev); if (status != SCI_SUCCESS) { spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -1547,7 +1542,7 @@ static int isci_reset_device(struct isci_host *ihost, /* Since all pending TCs have been cleaned, resume the RNC. */ spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset_complete(&idev->sci); + status = scic_remote_device_reset_complete(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* If this is a device on an expander, bring the phy back up. */ -- cgit v1.2.3-70-g09d2 From d9dcb4ba791de2a06b19ac47cd61601cf3d4e208 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Thu, 30 Jun 2011 17:38:32 -0700 Subject: isci: unify isci_host and scic_sds_controller Remove the distinction between these two implementations and unify on isci_host (local instances named ihost). Hmmm, we had two 'oem_parameters' instances, one was unused... nice. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 1000 ++++++++++++------------- drivers/scsi/isci/host.h | 210 ++---- drivers/scsi/isci/init.c | 10 +- drivers/scsi/isci/phy.c | 40 +- drivers/scsi/isci/port.c | 89 +-- drivers/scsi/isci/port.h | 4 +- drivers/scsi/isci/port_config.c | 82 +- drivers/scsi/isci/probe_roms.h | 2 +- drivers/scsi/isci/remote_device.c | 76 +- drivers/scsi/isci/remote_device.h | 6 +- drivers/scsi/isci/remote_node_context.c | 14 +- drivers/scsi/isci/request.c | 200 +++-- drivers/scsi/isci/request.h | 4 +- drivers/scsi/isci/task.c | 44 +- drivers/scsi/isci/unsolicited_frame_control.c | 6 +- drivers/scsi/isci/unsolicited_frame_control.h | 4 +- 16 files changed, 806 insertions(+), 985 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 45d7f71c609..bb298f8f609 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -181,35 +181,35 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) } static bool scic_sds_controller_completion_queue_has_entries( - struct scic_sds_controller *scic) + struct isci_host *ihost) { - u32 get_value = scic->completion_queue_get; + u32 get_value = ihost->completion_queue_get; u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == - COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])) + COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) return true; return false; } -static bool scic_sds_controller_isr(struct scic_sds_controller *scic) +static bool scic_sds_controller_isr(struct isci_host *ihost) { - if (scic_sds_controller_completion_queue_has_entries(scic)) { + if (scic_sds_controller_completion_queue_has_entries(ihost)) { return true; } else { /* * we have a spurious interrupt it could be that we have already * emptied the completion queue from a previous interrupt */ - writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* * There is a race in the hardware that could cause us not to be notified * of an interrupt completion if we do not take this step. We will mask * then unmask the interrupts so if there is another interrupt pending * the clearing of the interrupt source we get the next interrupt message. */ - writel(0xFF000000, &scic->smu_registers->interrupt_mask); - writel(0, &scic->smu_registers->interrupt_mask); + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); } return false; @@ -219,18 +219,18 @@ irqreturn_t isci_msix_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_isr(&ihost->sci)) + if (scic_sds_controller_isr(ihost)) tasklet_schedule(&ihost->completion_tasklet); return IRQ_HANDLED; } -static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) +static bool scic_sds_controller_error_isr(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = - readl(&scic->smu_registers->interrupt_status); + readl(&ihost->smu_registers->interrupt_status); interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); if (interrupt_status != 0) { @@ -246,28 +246,27 @@ static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) * then unmask the error interrupts so if there was another interrupt * pending we will be notified. * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ - writel(0xff, &scic->smu_registers->interrupt_mask); - writel(0, &scic->smu_registers->interrupt_mask); + writel(0xff, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); return false; } -static void scic_sds_controller_task_completion(struct scic_sds_controller *scic, +static void scic_sds_controller_task_completion(struct isci_host *ihost, u32 completion_entry) { u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); - struct isci_host *ihost = scic_to_ihost(scic); struct isci_request *ireq = ihost->reqs[index]; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && - ISCI_TAG_SEQ(ireq->io_tag) == scic->io_request_sequence[index]) + ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the io request handler */ scic_sds_io_request_tc_completion(ireq, completion_entry); } -static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic, +static void scic_sds_controller_sdma_completion(struct isci_host *ihost, u32 completion_entry) { u32 index; @@ -279,8 +278,8 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic switch (scu_get_command_request_type(completion_entry)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: - ireq = scic_to_ihost(scic)->reqs[index]; - dev_warn(scic_to_dev(scic), "%s: %x for io request %p\n", + ireq = ihost->reqs[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", __func__, completion_entry, ireq); /* @todo For a post TC operation we need to fail the IO * request @@ -289,27 +288,26 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: - idev = scic->device_table[index]; - dev_warn(scic_to_dev(scic), "%s: %x for device %p\n", + idev = ihost->device_table[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", __func__, completion_entry, idev); /* @todo For a port RNC operation we need to fail the * device */ break; default: - dev_warn(scic_to_dev(scic), "%s: unknown completion type %x\n", + dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", __func__, completion_entry); break; } } -static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic, +static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, u32 completion_entry) { u32 index; u32 frame_index; - struct isci_host *ihost = scic_to_ihost(scic); struct scu_unsolicited_frame_header *frame_header; struct isci_phy *iphy; struct isci_remote_device *idev; @@ -318,15 +316,15 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc frame_index = SCU_GET_FRAME_INDEX(completion_entry); - frame_header = scic->uf_control.buffers.array[frame_index].header; - scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; + frame_header = ihost->uf_control.buffers.array[frame_index].header; + ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; if (SCU_GET_FRAME_ERROR(completion_entry)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will * / fail if there is an error in the frame. */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return; } @@ -347,15 +345,15 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc iphy = &ihost->phys[index]; result = scic_sds_phy_frame_handler(iphy, frame_index); } else { - if (index < scic->remote_node_entries) - idev = scic->device_table[index]; + if (index < ihost->remote_node_entries) + idev = ihost->device_table[index]; else idev = NULL; if (idev != NULL) result = scic_sds_remote_device_frame_handler(idev, frame_index); else - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); } } @@ -366,10 +364,9 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc } } -static void scic_sds_controller_event_completion(struct scic_sds_controller *scic, +static void scic_sds_controller_event_completion(struct isci_host *ihost, u32 completion_entry) { - struct isci_host *ihost = scic_to_ihost(scic); struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; @@ -380,11 +377,11 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci switch (scu_get_event_type(completion_entry)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */ - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received SMU command error " "0x%x\n", __func__, - scic, + ihost, completion_entry); break; @@ -394,11 +391,11 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci /* * / @todo This is a hardware failure and its likely that we want to * / reset the controller. */ - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received fatal controller " "event 0x%x\n", __func__, - scic, + ihost, completion_entry); break; @@ -415,27 +412,27 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci if (ireq != NULL) scic_sds_io_request_event_handler(ireq, completion_entry); else - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for io request object " "that doesnt exist.\n", __func__, - scic, + ihost, completion_entry); break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: - idev = scic->device_table[index]; + idev = ihost->device_table[index]; if (idev != NULL) scic_sds_remote_device_event_handler(idev, completion_entry); else - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for remote device object " "that doesnt exist.\n", __func__, - scic, + ihost, completion_entry); break; @@ -459,25 +456,25 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: case SCU_EVENT_TYPE_RNC_OPS_MISC: - if (index < scic->remote_node_entries) { - idev = scic->device_table[index]; + if (index < ihost->remote_node_entries) { + idev = ihost->device_table[index]; if (idev != NULL) scic_sds_remote_device_event_handler(idev, completion_entry); } else - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " "for remote device object 0x%0x that doesnt " "exist.\n", __func__, - scic, + ihost, completion_entry, index); break; default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown event code %x\n", __func__, completion_entry); @@ -485,7 +482,7 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci } } -static void scic_sds_controller_process_completions(struct scic_sds_controller *scic) +static void scic_sds_controller_process_completions(struct isci_host *ihost) { u32 completion_count = 0; u32 completion_entry; @@ -494,47 +491,47 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * u32 event_get; u32 event_cycle; - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: completion queue begining get:0x%08x\n", __func__, - scic->completion_queue_get); + ihost->completion_queue_get); /* Get the component parts of the completion queue */ - get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get); - get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get; + get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); + get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; - event_get = NORMALIZE_EVENT_POINTER(scic->completion_queue_get); - event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get; + event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); + event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; while ( NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) - == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]) + == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) ) { completion_count++; - completion_entry = scic->completion_queue[get_index]; + completion_entry = ihost->completion_queue[get_index]; /* increment the get pointer and check for rollover to toggle the cycle bit */ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: completion queue entry:0x%08x\n", __func__, completion_entry); switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { case SCU_COMPLETION_TYPE_TASK: - scic_sds_controller_task_completion(scic, completion_entry); + scic_sds_controller_task_completion(ihost, completion_entry); break; case SCU_COMPLETION_TYPE_SDMA: - scic_sds_controller_sdma_completion(scic, completion_entry); + scic_sds_controller_sdma_completion(ihost, completion_entry); break; case SCU_COMPLETION_TYPE_UFI: - scic_sds_controller_unsolicited_frame(scic, completion_entry); + scic_sds_controller_unsolicited_frame(ihost, completion_entry); break; case SCU_COMPLETION_TYPE_EVENT: @@ -543,11 +540,11 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); event_get = (event_get+1) & (SCU_MAX_EVENTS-1); - scic_sds_controller_event_completion(scic, completion_entry); + scic_sds_controller_event_completion(ihost, completion_entry); break; } default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown " "completion type %x\n", __func__, @@ -558,7 +555,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * /* Update the get register if we completed one or more entries */ if (completion_count > 0) { - scic->completion_queue_get = + ihost->completion_queue_get = SMU_CQGR_GEN_BIT(ENABLE) | SMU_CQGR_GEN_BIT(EVENT_ENABLE) | event_cycle | @@ -566,35 +563,35 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * get_cycle | SMU_CQGR_GEN_VAL(POINTER, get_index); - writel(scic->completion_queue_get, - &scic->smu_registers->completion_queue_get); + writel(ihost->completion_queue_get, + &ihost->smu_registers->completion_queue_get); } - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: completion queue ending get:0x%08x\n", __func__, - scic->completion_queue_get); + ihost->completion_queue_get); } -static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) +static void scic_sds_controller_error_handler(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = - readl(&scic->smu_registers->interrupt_status); + readl(&ihost->smu_registers->interrupt_status); if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && - scic_sds_controller_completion_queue_has_entries(scic)) { + scic_sds_controller_completion_queue_has_entries(ihost)) { - scic_sds_controller_process_completions(scic); - writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status); + scic_sds_controller_process_completions(ihost); + writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); } else { - dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__, + dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, interrupt_status); - sci_change_state(&scic->sm, SCIC_FAILED); + sci_change_state(&ihost->sm, SCIC_FAILED); return; } @@ -602,22 +599,21 @@ static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) /* If we dont process any completions I am not sure that we want to do this. * We are in the middle of a hardware fault and should probably be reset. */ - writel(0, &scic->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); } irqreturn_t isci_intx_isr(int vec, void *data) { irqreturn_t ret = IRQ_NONE; struct isci_host *ihost = data; - struct scic_sds_controller *scic = &ihost->sci; - if (scic_sds_controller_isr(scic)) { - writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); + if (scic_sds_controller_isr(ihost)) { + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); tasklet_schedule(&ihost->completion_tasklet); ret = IRQ_HANDLED; - } else if (scic_sds_controller_error_isr(scic)) { + } else if (scic_sds_controller_error_isr(ihost)) { spin_lock(&ihost->scic_lock); - scic_sds_controller_error_handler(scic); + scic_sds_controller_error_handler(ihost); spin_unlock(&ihost->scic_lock); ret = IRQ_HANDLED; } @@ -629,8 +625,8 @@ irqreturn_t isci_error_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_error_isr(&ihost->sci)) - scic_sds_controller_error_handler(&ihost->sci); + if (scic_sds_controller_error_isr(ihost)) + scic_sds_controller_error_handler(ihost); return IRQ_HANDLED; } @@ -685,11 +681,10 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) * This method returns the number of milliseconds for the suggested start * operation timeout. */ -static u32 scic_controller_get_suggested_start_timeout( - struct scic_sds_controller *sc) +static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) { /* Validate the user supplied parameters. */ - if (sc == NULL) + if (!ihost) return 0; /* @@ -711,35 +706,32 @@ static u32 scic_controller_get_suggested_start_timeout( + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } -static void scic_controller_enable_interrupts( - struct scic_sds_controller *scic) +static void scic_controller_enable_interrupts(struct isci_host *ihost) { - BUG_ON(scic->smu_registers == NULL); - writel(0, &scic->smu_registers->interrupt_mask); + BUG_ON(ihost->smu_registers == NULL); + writel(0, &ihost->smu_registers->interrupt_mask); } -void scic_controller_disable_interrupts( - struct scic_sds_controller *scic) +void scic_controller_disable_interrupts(struct isci_host *ihost) { - BUG_ON(scic->smu_registers == NULL); - writel(0xffffffff, &scic->smu_registers->interrupt_mask); + BUG_ON(ihost->smu_registers == NULL); + writel(0xffffffff, &ihost->smu_registers->interrupt_mask); } -static void scic_sds_controller_enable_port_task_scheduler( - struct scic_sds_controller *scic) +static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) { u32 port_task_scheduler_value; port_task_scheduler_value = - readl(&scic->scu_registers->peg0.ptsg.control); + readl(&ihost->scu_registers->peg0.ptsg.control); port_task_scheduler_value |= (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); writel(port_task_scheduler_value, - &scic->scu_registers->peg0.ptsg.control); + &ihost->scu_registers->peg0.ptsg.control); } -static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic) +static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) { u32 task_assignment; @@ -749,32 +741,32 @@ static void scic_sds_controller_assign_task_entries(struct scic_sds_controller * */ task_assignment = - readl(&scic->smu_registers->task_context_assignment[0]); + readl(&ihost->smu_registers->task_context_assignment[0]); task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | - (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) | + (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); writel(task_assignment, - &scic->smu_registers->task_context_assignment[0]); + &ihost->smu_registers->task_context_assignment[0]); } -static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic) +static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) { u32 index; u32 completion_queue_control_value; u32 completion_queue_get_value; u32 completion_queue_put_value; - scic->completion_queue_get = 0; + ihost->completion_queue_get = 0; completion_queue_control_value = (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); writel(completion_queue_control_value, - &scic->smu_registers->completion_queue_control); + &ihost->smu_registers->completion_queue_control); /* Set the completion queue get pointer and enable the queue */ @@ -786,7 +778,7 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont ); writel(completion_queue_get_value, - &scic->smu_registers->completion_queue_get); + &ihost->smu_registers->completion_queue_get); /* Set the completion queue put pointer */ completion_queue_put_value = ( @@ -795,7 +787,7 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont ); writel(completion_queue_put_value, - &scic->smu_registers->completion_queue_put); + &ihost->smu_registers->completion_queue_put); /* Initialize the cycle bit of the completion queue entries */ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { @@ -803,11 +795,11 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont * If get.cycle_bit != completion_queue.cycle_bit * its not a valid completion queue entry * so at system start all entries are invalid */ - scic->completion_queue[index] = 0x80000000; + ihost->completion_queue[index] = 0x80000000; } } -static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic) +static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) { u32 frame_queue_control_value; u32 frame_queue_get_value; @@ -818,7 +810,7 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); writel(frame_queue_control_value, - &scic->scu_registers->sdma.unsolicited_frame_queue_control); + &ihost->scu_registers->sdma.unsolicited_frame_queue_control); /* Setup the get pointer for the unsolicited frame queue */ frame_queue_get_value = ( @@ -827,11 +819,11 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s ); writel(frame_queue_get_value, - &scic->scu_registers->sdma.unsolicited_frame_get_pointer); + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); /* Setup the put pointer for the unsolicited frame queue */ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); writel(frame_queue_put_value, - &scic->scu_registers->sdma.unsolicited_frame_put_pointer); + &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } /** @@ -846,17 +838,16 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s * none. */ static void scic_sds_controller_transition_to_ready( - struct scic_sds_controller *scic, + struct isci_host *ihost, enum sci_status status) { - struct isci_host *ihost = scic_to_ihost(scic); - if (scic->sm.current_state_id == SCIC_STARTING) { + if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports * may be up and operational. */ - sci_change_state(&scic->sm, SCIC_READY); + sci_change_state(&ihost->sm, SCIC_READY); isci_host_start_complete(ihost, status); } @@ -892,19 +883,18 @@ static bool is_phy_starting(struct isci_phy *iphy) * controller to the READY state and inform the user * (scic_cb_controller_start_complete()). */ -static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) { - struct isci_host *ihost = scic_to_ihost(scic); - struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; + struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; struct isci_phy *iphy; enum sci_status status; status = SCI_SUCCESS; - if (scic->phy_startup_timer_pending) + if (ihost->phy_startup_timer_pending) return status; - if (scic->next_phy_to_start >= SCI_MAX_PHYS) { + if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { bool is_controller_start_complete = true; u32 state; u8 index; @@ -934,16 +924,16 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro * The controller has successfully finished the start process. * Inform the SCI Core user and transition to the READY state. */ if (is_controller_start_complete == true) { - scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS); - sci_del_timer(&scic->phy_timer); - scic->phy_startup_timer_pending = false; + scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; } } else { - iphy = &ihost->phys[scic->next_phy_to_start]; + iphy = &ihost->phys[ihost->next_phy_to_start]; if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { if (phy_get_non_dummy_port(iphy) == NULL) { - scic->next_phy_to_start++; + ihost->next_phy_to_start++; /* Caution recursion ahead be forwarned * @@ -954,27 +944,27 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro * incorrectly for the PORT or it was never * assigned to a PORT */ - return scic_sds_controller_start_next_phy(scic); + return scic_sds_controller_start_next_phy(ihost); } } status = scic_sds_phy_start(iphy); if (status == SCI_SUCCESS) { - sci_mod_timer(&scic->phy_timer, + sci_mod_timer(&ihost->phy_timer, SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); - scic->phy_startup_timer_pending = true; + ihost->phy_startup_timer_pending = true; } else { - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop phy %d because of status " "%d.\n", __func__, - ihost->phys[scic->next_phy_to_start].phy_index, + ihost->phys[ihost->next_phy_to_start].phy_index, status); } - scic->next_phy_to_start++; + ihost->next_phy_to_start++; } return status; @@ -983,8 +973,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro static void phy_startup_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), phy_timer); - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); unsigned long flags; enum sci_status status; @@ -993,10 +982,10 @@ static void phy_startup_timeout(unsigned long data) if (tmr->cancel) goto done; - scic->phy_startup_timer_pending = false; + ihost->phy_startup_timer_pending = false; do { - status = scic_sds_controller_start_next_phy(scic); + status = scic_sds_controller_start_next_phy(ihost); } while (status != SCI_SUCCESS); done: @@ -1008,15 +997,14 @@ static u16 isci_tci_active(struct isci_host *ihost) return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } -static enum sci_status scic_controller_start(struct scic_sds_controller *scic, +static enum sci_status scic_controller_start(struct isci_host *ihost, u32 timeout) { - struct isci_host *ihost = scic_to_ihost(scic); enum sci_status result; u16 index; - if (scic->sm.current_state_id != SCIC_INITIALIZED) { - dev_warn(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_INITIALIZED) { + dev_warn(&ihost->pdev->dev, "SCIC Controller start operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; @@ -1026,34 +1014,34 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); ihost->tci_head = 0; ihost->tci_tail = 0; - for (index = 0; index < scic->task_context_entries; index++) + for (index = 0; index < ihost->task_context_entries; index++) isci_tci_free(ihost, index); /* Build the RNi free pool */ scic_sds_remote_node_table_initialize( - &scic->available_remote_nodes, - scic->remote_node_entries); + &ihost->available_remote_nodes, + ihost->remote_node_entries); /* * Before anything else lets make sure we will not be * interrupted by the hardware. */ - scic_controller_disable_interrupts(scic); + scic_controller_disable_interrupts(ihost); /* Enable the port task scheduler */ - scic_sds_controller_enable_port_task_scheduler(scic); + scic_sds_controller_enable_port_task_scheduler(ihost); - /* Assign all the task entries to scic physical function */ - scic_sds_controller_assign_task_entries(scic); + /* Assign all the task entries to ihost physical function */ + scic_sds_controller_assign_task_entries(ihost); /* Now initialize the completion queue */ - scic_sds_controller_initialize_completion_queue(scic); + scic_sds_controller_initialize_completion_queue(ihost); /* Initialize the unsolicited frame queue for use */ - scic_sds_controller_initialize_unsolicited_frame_queue(scic); + scic_sds_controller_initialize_unsolicited_frame_queue(ihost); /* Start all of the ports on this controller */ - for (index = 0; index < scic->logical_port_entries; index++) { + for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; result = scic_sds_port_start(iport); @@ -1061,11 +1049,11 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, return result; } - scic_sds_controller_start_next_phy(scic); + scic_sds_controller_start_next_phy(ihost); - sci_mod_timer(&scic->timer, timeout); + sci_mod_timer(&ihost->timer, timeout); - sci_change_state(&scic->sm, SCIC_STARTING); + sci_change_state(&ihost->sm, SCIC_STARTING); return SCI_SUCCESS; } @@ -1073,35 +1061,35 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, void isci_host_scan_start(struct Scsi_Host *shost) { struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; - unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci); + unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); set_bit(IHOST_START_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_start(&ihost->sci, tmo); - scic_controller_enable_interrupts(&ihost->sci); + scic_controller_start(ihost, tmo); + scic_controller_enable_interrupts(ihost); spin_unlock_irq(&ihost->scic_lock); } static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) { isci_host_change_state(ihost, isci_stopped); - scic_controller_disable_interrupts(&ihost->sci); + scic_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); } -static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic) +static void scic_sds_controller_completion_handler(struct isci_host *ihost) { /* Empty out the completion queue */ - if (scic_sds_controller_completion_queue_has_entries(scic)) - scic_sds_controller_process_completions(scic); + if (scic_sds_controller_completion_queue_has_entries(ihost)) + scic_sds_controller_process_completions(ihost); /* Clear the interrupt and enable all interrupts again */ - writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* Could we write the value of SMU_ISR_COMPLETION? */ - writel(0xFF000000, &scic->smu_registers->interrupt_mask); - writel(0, &scic->smu_registers->interrupt_mask); + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); } /** @@ -1114,7 +1102,7 @@ static void scic_sds_controller_completion_handler(struct scic_sds_controller *s */ static void isci_host_completion_routine(unsigned long data) { - struct isci_host *isci_host = (struct isci_host *)data; + struct isci_host *ihost = (struct isci_host *)data; struct list_head completed_request_list; struct list_head errored_request_list; struct list_head *current_position; @@ -1126,20 +1114,20 @@ static void isci_host_completion_routine(unsigned long data) INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&errored_request_list); - spin_lock_irq(&isci_host->scic_lock); + spin_lock_irq(&ihost->scic_lock); - scic_sds_controller_completion_handler(&isci_host->sci); + scic_sds_controller_completion_handler(ihost); /* Take the lists of completed I/Os from the host. */ - list_splice_init(&isci_host->requests_to_complete, + list_splice_init(&ihost->requests_to_complete, &completed_request_list); /* Take the list of errored I/Os from the host. */ - list_splice_init(&isci_host->requests_to_errorback, + list_splice_init(&ihost->requests_to_errorback, &errored_request_list); - spin_unlock_irq(&isci_host->scic_lock); + spin_unlock_irq(&ihost->scic_lock); /* Process any completions in the lists. */ list_for_each_safe(current_position, next_position, @@ -1150,7 +1138,7 @@ static void isci_host_completion_routine(unsigned long data) task = isci_request_access_task(request); /* Normal notification (task_done) */ - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: Normal - request/task = %p/%p\n", __func__, request, @@ -1169,9 +1157,9 @@ static void isci_host_completion_routine(unsigned long data) } } - spin_lock_irq(&isci_host->scic_lock); - isci_free_tag(isci_host, request->io_tag); - spin_unlock_irq(&isci_host->scic_lock); + spin_lock_irq(&ihost->scic_lock); + isci_free_tag(ihost, request->io_tag); + spin_unlock_irq(&ihost->scic_lock); } list_for_each_entry_safe(request, next_request, &errored_request_list, completed_node) { @@ -1179,7 +1167,7 @@ static void isci_host_completion_routine(unsigned long data) task = isci_request_access_task(request); /* Use sas_task_abort */ - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: Error - request/task = %p/%p\n", __func__, request, @@ -1202,13 +1190,13 @@ static void isci_host_completion_routine(unsigned long data) * it. */ - spin_lock_irq(&isci_host->scic_lock); + spin_lock_irq(&ihost->scic_lock); /* Remove the request from the remote device's list * of pending requests. */ list_del_init(&request->dev_node); - isci_free_tag(isci_host, request->io_tag); - spin_unlock_irq(&isci_host->scic_lock); + isci_free_tag(ihost, request->io_tag); + spin_unlock_irq(&ihost->scic_lock); } } @@ -1232,18 +1220,18 @@ static void isci_host_completion_routine(unsigned long data) * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states. */ -static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, +static enum sci_status scic_controller_stop(struct isci_host *ihost, u32 timeout) { - if (scic->sm.current_state_id != SCIC_READY) { - dev_warn(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "SCIC Controller stop operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; } - sci_mod_timer(&scic->timer, timeout); - sci_change_state(&scic->sm, SCIC_STOPPING); + sci_mod_timer(&ihost->timer, timeout); + sci_change_state(&ihost->sm, SCIC_STOPPING); return SCI_SUCCESS; } @@ -1259,9 +1247,9 @@ static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete. */ -static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) +static enum sci_status scic_controller_reset(struct isci_host *ihost) { - switch (scic->sm.current_state_id) { + switch (ihost->sm.current_state_id) { case SCIC_RESET: case SCIC_READY: case SCIC_STOPPED: @@ -1270,10 +1258,10 @@ static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) * The reset operation is not a graceful cleanup, just * perform the state transition. */ - sci_change_state(&scic->sm, SCIC_RESETTING); + sci_change_state(&ihost->sm, SCIC_RESETTING); return SCI_SUCCESS; default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "SCIC Controller reset operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; @@ -1298,14 +1286,14 @@ void isci_host_deinit(struct isci_host *ihost) set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT); + scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); - scic_controller_reset(&ihost->sci); + scic_controller_reset(ihost); /* Cancel any/all outstanding port timers */ - for (i = 0; i < ihost->sci.logical_port_entries; i++) { + for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; del_timer_sync(&iport->timer.timer); } @@ -1316,13 +1304,13 @@ void isci_host_deinit(struct isci_host *ihost) del_timer_sync(&iphy->sata_timer.timer); } - del_timer_sync(&ihost->sci.port_agent.timer.timer); + del_timer_sync(&ihost->port_agent.timer.timer); - del_timer_sync(&ihost->sci.power_control.timer.timer); + del_timer_sync(&ihost->power_control.timer.timer); - del_timer_sync(&ihost->sci.timer.timer); + del_timer_sync(&ihost->timer.timer); - del_timer_sync(&ihost->sci.phy_timer.timer); + del_timer_sync(&ihost->phy_timer.timer); } static void __iomem *scu_base(struct isci_host *isci_host) @@ -1369,16 +1357,16 @@ static void isci_user_parameters_get( static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - sci_change_state(&scic->sm, SCIC_RESET); + sci_change_state(&ihost->sm, SCIC_RESET); } static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - sci_del_timer(&scic->timer); + sci_del_timer(&ihost->timer); } #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 @@ -1405,10 +1393,10 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state * SCI_SUCCESS The user successfully updated the interrutp coalescence. * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. */ -static enum sci_status scic_controller_set_interrupt_coalescence( - struct scic_sds_controller *scic_controller, - u32 coalesce_number, - u32 coalesce_timeout) +static enum sci_status +scic_controller_set_interrupt_coalescence(struct isci_host *ihost, + u32 coalesce_number, + u32 coalesce_timeout) { u8 timeout_encode = 0; u32 min = 0; @@ -1491,11 +1479,11 @@ static enum sci_status scic_controller_set_interrupt_coalescence( writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | SMU_ICC_GEN_VAL(TIMER, timeout_encode), - &scic_controller->smu_registers->interrupt_coalesce_control); + &ihost->smu_registers->interrupt_coalesce_control); - scic_controller->interrupt_coalesce_number = (u16)coalesce_number; - scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100; + ihost->interrupt_coalesce_number = (u16)coalesce_number; + ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; return SCI_SUCCESS; } @@ -1503,26 +1491,25 @@ static enum sci_status scic_controller_set_interrupt_coalescence( static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* set the default interrupt coalescence number and timeout value. */ - scic_controller_set_interrupt_coalescence(scic, 0x10, 250); + scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); } static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* disable interrupt coalescence. */ - scic_controller_set_interrupt_coalescence(scic, 0, 0); + scic_controller_set_interrupt_coalescence(ihost, 0, 0); } -static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) { u32 index; enum sci_status status; enum sci_status phy_status; - struct isci_host *ihost = scic_to_ihost(scic); status = SCI_SUCCESS; @@ -1533,7 +1520,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller phy_status != SCI_FAILURE_INVALID_STATE) { status = SCI_FAILURE; - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to stop " "phy %d because of status %d.\n", __func__, @@ -1544,14 +1531,13 @@ static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller return status; } -static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) { u32 index; enum sci_status port_status; enum sci_status status = SCI_SUCCESS; - struct isci_host *ihost = scic_to_ihost(scic); - for (index = 0; index < scic->logical_port_entries; index++) { + for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; port_status = scic_sds_port_stop(iport); @@ -1560,7 +1546,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller (port_status != SCI_FAILURE_INVALID_STATE)) { status = SCI_FAILURE; - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to " "stop port %d because of status %d.\n", __func__, @@ -1572,7 +1558,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller return status; } -static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic) +static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) { u32 index; enum sci_status status; @@ -1580,19 +1566,19 @@ static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controll status = SCI_SUCCESS; - for (index = 0; index < scic->remote_node_entries; index++) { - if (scic->device_table[index] != NULL) { + for (index = 0; index < ihost->remote_node_entries; index++) { + if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ - device_status = scic_remote_device_stop(scic->device_table[index], 0); + device_status = scic_remote_device_stop(ihost->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop device 0x%p because of " "status %d.\n", __func__, - scic->device_table[index], device_status); + ihost->device_table[index], device_status); } } } @@ -1602,19 +1588,19 @@ static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controll static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* Stop all of the components for this controller */ - scic_sds_controller_stop_phys(scic); - scic_sds_controller_stop_ports(scic); - scic_sds_controller_stop_devices(scic); + scic_sds_controller_stop_phys(ihost); + scic_sds_controller_stop_ports(ihost); + scic_sds_controller_stop_devices(ihost); } static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - sci_del_timer(&scic->timer); + sci_del_timer(&ihost->timer); } @@ -1623,30 +1609,30 @@ static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machin * * This method will reset the controller hardware. */ -static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic) +static void scic_sds_controller_reset_hardware(struct isci_host *ihost) { /* Disable interrupts so we dont take any spurious interrupts */ - scic_controller_disable_interrupts(scic); + scic_controller_disable_interrupts(ihost); /* Reset the SCU */ - writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control); + writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); /* Delay for 1ms to before clearing the CQP and UFQPR. */ udelay(1000); /* The write to the CQGR clears the CQP */ - writel(0x00000000, &scic->smu_registers->completion_queue_get); + writel(0x00000000, &ihost->smu_registers->completion_queue_get); /* The write to the UFQGP clears the UFQPR */ - writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer); + writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - scic_sds_controller_reset_hardware(scic); - sci_change_state(&scic->sm, SCIC_RESET); + scic_sds_controller_reset_hardware(ihost); + sci_change_state(&ihost->sm, SCIC_RESET); } static const struct sci_base_state scic_sds_controller_state_table[] = { @@ -1674,58 +1660,56 @@ static const struct sci_base_state scic_sds_controller_state_table[] = { [SCIC_FAILED] = {} }; -static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic) +static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) { /* these defaults are overridden by the platform / firmware */ - struct isci_host *ihost = scic_to_ihost(scic); u16 index; /* Default to APC mode. */ - scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; /* Default to APC mode. */ - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; + ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; /* Default to no SSC operation. */ - scic->oem_parameters.sds1.controller.do_enable_ssc = false; + ihost->oem_parameters.sds1.controller.do_enable_ssc = false; /* Initialize all of the port parameter information to narrow ports. */ for (index = 0; index < SCI_MAX_PORTS; index++) { - scic->oem_parameters.sds1.ports[index].phy_mask = 0; + ihost->oem_parameters.sds1.ports[index].phy_mask = 0; } /* Initialize all of the phy parameter information. */ for (index = 0; index < SCI_MAX_PHYS; index++) { /* Default to 6G (i.e. Gen 3) for now. */ - scic->user_parameters.sds1.phys[index].max_speed_generation = 3; + ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; /* the frequencies cannot be 0 */ - scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; - scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; - scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; + ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; + ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; + ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; /* * Previous Vitesse based expanders had a arbitration issue that * is worked around by having the upper 32-bits of SAS address * with a value greater then the Vitesse company identifier. * Hence, usage of 0x5FCFFFFF. */ - scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; - scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; + ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; + ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; } - scic->user_parameters.sds1.stp_inactivity_timeout = 5; - scic->user_parameters.sds1.ssp_inactivity_timeout = 5; - scic->user_parameters.sds1.stp_max_occupancy_timeout = 5; - scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20; - scic->user_parameters.sds1.no_outbound_task_timeout = 20; + ihost->user_parameters.sds1.stp_inactivity_timeout = 5; + ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; + ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; + ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; + ihost->user_parameters.sds1.no_outbound_task_timeout = 20; } static void controller_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer); - struct isci_host *ihost = scic_to_ihost(scic); - struct sci_base_state_machine *sm = &scic->sm; + struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); + struct sci_base_state_machine *sm = &ihost->sm; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); @@ -1734,12 +1718,12 @@ static void controller_timeout(unsigned long data) goto done; if (sm->current_state_id == SCIC_STARTING) - scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT); + scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); } else /* / @todo Now what do we want to do in this case? */ - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: Controller timer fired when controller was not " "in a state being timed.\n", __func__); @@ -1764,24 +1748,23 @@ done: * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the * controller does not support the supplied initialization data version. */ -static enum sci_status scic_controller_construct(struct scic_sds_controller *scic, +static enum sci_status scic_controller_construct(struct isci_host *ihost, void __iomem *scu_base, void __iomem *smu_base) { - struct isci_host *ihost = scic_to_ihost(scic); u8 i; - sci_init_sm(&scic->sm, scic_sds_controller_state_table, SCIC_INITIAL); + sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); - scic->scu_registers = scu_base; - scic->smu_registers = smu_base; + ihost->scu_registers = scu_base; + ihost->smu_registers = smu_base; - scic_sds_port_configuration_agent_construct(&scic->port_agent); + scic_sds_port_configuration_agent_construct(&ihost->port_agent); /* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++) - scic_sds_port_construct(&ihost->ports[i], i, scic); - scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, scic); + scic_sds_port_construct(&ihost->ports[i], i, ihost); + scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); /* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { @@ -1790,14 +1773,14 @@ static enum sci_status scic_controller_construct(struct scic_sds_controller *sci &ihost->ports[SCI_MAX_PORTS], i); } - scic->invalid_phy_mask = 0; + ihost->invalid_phy_mask = 0; - sci_init_timer(&scic->timer, controller_timeout); + sci_init_timer(&ihost->timer, controller_timeout); /* Initialize the User and OEM parameters to default values. */ - scic_sds_controller_set_default_config_parameters(scic); + scic_sds_controller_set_default_config_parameters(ihost); - return scic_controller_reset(scic); + return scic_controller_reset(ihost); } int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) @@ -1834,10 +1817,10 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) return 0; } -static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, +static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, union scic_oem_parameters *scic_parms) { - u32 state = scic->sm.current_state_id; + u32 state = ihost->sm.current_state_id; if (state == SCIC_RESET || state == SCIC_INITIALIZING || @@ -1845,7 +1828,7 @@ static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, if (scic_oem_parameters_validate(&scic_parms->sds1)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - scic->oem_parameters.sds1 = scic_parms->sds1; + ihost->oem_parameters.sds1 = scic_parms->sds1; return SCI_SUCCESS; } @@ -1854,17 +1837,16 @@ static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, } void scic_oem_parameters_get( - struct scic_sds_controller *scic, + struct isci_host *ihost, union scic_oem_parameters *scic_parms) { - memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms)); + memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms)); } static void power_control_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer); - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); struct isci_phy *iphy; unsigned long flags; u8 i; @@ -1874,29 +1856,29 @@ static void power_control_timeout(unsigned long data) if (tmr->cancel) goto done; - scic->power_control.phys_granted_power = 0; + ihost->power_control.phys_granted_power = 0; - if (scic->power_control.phys_waiting == 0) { - scic->power_control.timer_started = false; + if (ihost->power_control.phys_waiting == 0) { + ihost->power_control.timer_started = false; goto done; } for (i = 0; i < SCI_MAX_PHYS; i++) { - if (scic->power_control.phys_waiting == 0) + if (ihost->power_control.phys_waiting == 0) break; - iphy = scic->power_control.requesters[i]; + iphy = ihost->power_control.requesters[i]; if (iphy == NULL) continue; - if (scic->power_control.phys_granted_power >= - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) + if (ihost->power_control.phys_granted_power >= + ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) break; - scic->power_control.requesters[i] = NULL; - scic->power_control.phys_waiting--; - scic->power_control.phys_granted_power++; + ihost->power_control.requesters[i] = NULL; + ihost->power_control.phys_waiting--; + ihost->power_control.phys_granted_power++; scic_sds_phy_consume_power_handler(iphy); } @@ -1905,7 +1887,7 @@ static void power_control_timeout(unsigned long data) * timer in case another phy becomes ready. */ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); - scic->power_control.timer_started = true; + ihost->power_control.timer_started = true; done: spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -1918,31 +1900,31 @@ done: * */ void scic_sds_controller_power_control_queue_insert( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); - if (scic->power_control.phys_granted_power < - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { - scic->power_control.phys_granted_power++; + if (ihost->power_control.phys_granted_power < + ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { + ihost->power_control.phys_granted_power++; scic_sds_phy_consume_power_handler(iphy); /* * stop and start the power_control timer. When the timer fires, the * no_of_phys_granted_power will be set to 0 */ - if (scic->power_control.timer_started) - sci_del_timer(&scic->power_control.timer); + if (ihost->power_control.timer_started) + sci_del_timer(&ihost->power_control.timer); - sci_mod_timer(&scic->power_control.timer, + sci_mod_timer(&ihost->power_control.timer, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); - scic->power_control.timer_started = true; + ihost->power_control.timer_started = true; } else { /* Add the phy in the waiting list */ - scic->power_control.requesters[iphy->phy_index] = iphy; - scic->power_control.phys_waiting++; + ihost->power_control.requesters[iphy->phy_index] = iphy; + ihost->power_control.phys_waiting++; } } @@ -1953,16 +1935,16 @@ void scic_sds_controller_power_control_queue_insert( * */ void scic_sds_controller_power_control_queue_remove( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); - if (scic->power_control.requesters[iphy->phy_index] != NULL) { - scic->power_control.phys_waiting--; + if (ihost->power_control.requesters[iphy->phy_index] != NULL) { + ihost->power_control.phys_waiting--; } - scic->power_control.requesters[iphy->phy_index] = NULL; + ihost->power_control.requesters[iphy->phy_index] = NULL; } #define AFE_REGISTER_WRITE_DELAY 10 @@ -1970,50 +1952,50 @@ void scic_sds_controller_power_control_queue_remove( /* Initialize the AFE for this phy index. We need to read the AFE setup from * the OEM parameters */ -static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) +static void scic_sds_controller_afe_initialization(struct isci_host *ihost) { - const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; + const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; u32 afe_status; u32 phy_id; /* Clear DFX Status registers */ - writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0); + writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); if (is_b0()) { /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement * Timer, PM Stagger Timer */ - writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2); + writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); udelay(AFE_REGISTER_WRITE_DELAY); } /* Configure bias currents to normal */ if (is_a0()) - writel(0x00005500, &scic->scu_registers->afe.afe_bias_control); + writel(0x00005500, &ihost->scu_registers->afe.afe_bias_control); else if (is_a2()) - writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control); + writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); else if (is_b0() || is_c0()) - writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control); + writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable PLL */ if (is_b0() || is_c0()) - writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0); + writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); else - writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0); + writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Wait for the PLL to lock */ do { - afe_status = readl(&scic->scu_registers->afe.afe_common_block_status); + afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); udelay(AFE_REGISTER_WRITE_DELAY); } while ((afe_status & 0x00001000) == 0); if (is_a0() || is_a2()) { /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ - writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0); + writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } @@ -2022,26 +2004,26 @@ static void scic_sds_controller_afe_initialization(struct scic_sds_controller *s if (is_b0()) { /* Configure transmitter SSC parameters */ - writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c0()) { /* Configure transmitter SSC parameters */ - writel(0x0003000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); /* * All defaults, except the Receive Word Alignament/Comma Detect * Enable....(0xe800) */ - writel(0x00004500, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } else { /* * All defaults, except the Receive Word Alignament/Comma Detect * Enable....(0xe800) */ - writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); - writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); + writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); udelay(AFE_REGISTER_WRITE_DELAY); } @@ -2049,106 +2031,105 @@ static void scic_sds_controller_afe_initialization(struct scic_sds_controller *s * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) * & increase TX int & ext bias 20%....(0xe85c) */ if (is_a0()) - writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); else if (is_a2()) - writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); else if (is_b0()) { /* Power down TX and RX (PWRDNTX and PWRDNRX) */ - writel(0x000003D7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); /* * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) * & increase TX int & ext bias 20%....(0xe85c) */ - writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); } else { - writel(0x000001E7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); /* * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) * & increase TX int & ext bias 20%....(0xe85c) */ - writel(0x000001E4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); } udelay(AFE_REGISTER_WRITE_DELAY); if (is_a0() || is_a2()) { /* Enable TX equalization (0xe824) */ - writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); udelay(AFE_REGISTER_WRITE_DELAY); } /* * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), * RDD=0x0(RX Detect Enabled) ....(0xe800) */ - writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Leave DFE/FFE on */ if (is_a0()) - writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F09983F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); else if (is_a2()) - writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); else if (is_b0()) { - writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ - writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); } else { - writel(0x0140DF0F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); + writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); udelay(AFE_REGISTER_WRITE_DELAY); - writel(0x3F6F103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ - writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); } udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control0, - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control1, - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control2, - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control3, - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); udelay(AFE_REGISTER_WRITE_DELAY); } /* Transfer control to the PEs */ - writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0); + writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } -static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) +static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) { - sci_init_timer(&scic->power_control.timer, power_control_timeout); + sci_init_timer(&ihost->power_control.timer, power_control_timeout); - memset(scic->power_control.requesters, 0, - sizeof(scic->power_control.requesters)); + memset(ihost->power_control.requesters, 0, + sizeof(ihost->power_control.requesters)); - scic->power_control.phys_waiting = 0; - scic->power_control.phys_granted_power = 0; + ihost->power_control.phys_waiting = 0; + ihost->power_control.phys_granted_power = 0; } -static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) +static enum sci_status scic_controller_initialize(struct isci_host *ihost) { - struct sci_base_state_machine *sm = &scic->sm; - struct isci_host *ihost = scic_to_ihost(scic); + struct sci_base_state_machine *sm = &ihost->sm; enum sci_status result = SCI_FAILURE; unsigned long i, state, val; - if (scic->sm.current_state_id != SCIC_RESET) { - dev_warn(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_RESET) { + dev_warn(&ihost->pdev->dev, "SCIC Controller initialize operation requested " "in invalid state\n"); return SCI_FAILURE_INVALID_STATE; @@ -2156,23 +2137,23 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc sci_change_state(sm, SCIC_INITIALIZING); - sci_init_timer(&scic->phy_timer, phy_startup_timeout); + sci_init_timer(&ihost->phy_timer, phy_startup_timeout); - scic->next_phy_to_start = 0; - scic->phy_startup_timer_pending = false; + ihost->next_phy_to_start = 0; + ihost->phy_startup_timer_pending = false; - scic_sds_controller_initialize_power_control(scic); + scic_sds_controller_initialize_power_control(ihost); /* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but * / presently they seem to be wrong. */ - scic_sds_controller_afe_initialization(scic); + scic_sds_controller_afe_initialization(ihost); /* Take the hardware out of reset */ - writel(0, &scic->smu_registers->soft_reset_control); + writel(0, &ihost->smu_registers->soft_reset_control); /* * / @todo Provide meaningfull error code for hardware failure @@ -2182,7 +2163,7 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc /* Loop until the hardware reports success */ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); - status = readl(&scic->smu_registers->control_status); + status = readl(&ihost->smu_registers->control_status); if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) break; @@ -2193,32 +2174,32 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc /* * Determine what are the actaul device capacities that the * hardware will support */ - val = readl(&scic->smu_registers->device_context_capacity); + val = readl(&ihost->smu_registers->device_context_capacity); /* Record the smaller of the two capacity values */ - scic->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); - scic->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); - scic->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); + ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); + ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); + ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); /* * Make all PEs that are unassigned match up with the * logical ports */ - for (i = 0; i < scic->logical_port_entries; i++) { + for (i = 0; i < ihost->logical_port_entries; i++) { struct scu_port_task_scheduler_group_registers __iomem - *ptsg = &scic->scu_registers->peg0.ptsg; + *ptsg = &ihost->scu_registers->peg0.ptsg; writel(i, &ptsg->protocol_engine[i]); } /* Initialize hardware PCI Relaxed ordering in DMA engines */ - val = readl(&scic->scu_registers->sdma.pdma_configuration); + val = readl(&ihost->scu_registers->sdma.pdma_configuration); val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); - writel(val, &scic->scu_registers->sdma.pdma_configuration); + writel(val, &ihost->scu_registers->sdma.pdma_configuration); - val = readl(&scic->scu_registers->sdma.cdma_configuration); + val = readl(&ihost->scu_registers->sdma.cdma_configuration); val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); - writel(val, &scic->scu_registers->sdma.cdma_configuration); + writel(val, &ihost->scu_registers->sdma.cdma_configuration); /* * Initialize the PHYs before the PORTs because the PHY registers @@ -2226,23 +2207,23 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc */ for (i = 0; i < SCI_MAX_PHYS; i++) { result = scic_sds_phy_initialize(&ihost->phys[i], - &scic->scu_registers->peg0.pe[i].tl, - &scic->scu_registers->peg0.pe[i].ll); + &ihost->scu_registers->peg0.pe[i].tl, + &ihost->scu_registers->peg0.pe[i].ll); if (result != SCI_SUCCESS) goto out; } - for (i = 0; i < scic->logical_port_entries; i++) { + for (i = 0; i < ihost->logical_port_entries; i++) { result = scic_sds_port_initialize(&ihost->ports[i], - &scic->scu_registers->peg0.ptsg.port[i], - &scic->scu_registers->peg0.ptsg.protocol_engine, - &scic->scu_registers->peg0.viit[i]); + &ihost->scu_registers->peg0.ptsg.port[i], + &ihost->scu_registers->peg0.ptsg.protocol_engine, + &ihost->scu_registers->peg0.viit[i]); if (result != SCI_SUCCESS) goto out; } - result = scic_sds_port_configuration_agent_initialize(scic, &scic->port_agent); + result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); out: /* Advance the controller state machine */ @@ -2256,10 +2237,10 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc } static enum sci_status scic_user_parameters_set( - struct scic_sds_controller *scic, + struct isci_host *ihost, union scic_user_parameters *scic_parms) { - u32 state = scic->sm.current_state_id; + u32 state = ihost->sm.current_state_id; if (state == SCIC_RESET || state == SCIC_INITIALIZING || @@ -2301,7 +2282,7 @@ static enum sci_status scic_user_parameters_set( (scic_parms->sds1.no_outbound_task_timeout == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms)); + memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); return SCI_SUCCESS; } @@ -2309,40 +2290,40 @@ static enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_STATE; } -static int scic_controller_mem_init(struct scic_sds_controller *scic) +static int scic_controller_mem_init(struct isci_host *ihost) { - struct device *dev = scic_to_dev(scic); + struct device *dev = &ihost->pdev->dev; dma_addr_t dma; size_t size; int err; size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); - scic->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); - if (!scic->completion_queue) + ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); + if (!ihost->completion_queue) return -ENOMEM; - writel(lower_32_bits(dma), &scic->smu_registers->completion_queue_lower); - writel(upper_32_bits(dma), &scic->smu_registers->completion_queue_upper); + writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); - size = scic->remote_node_entries * sizeof(union scu_remote_node_context); - scic->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, + size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); + ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); - if (!scic->remote_node_context_table) + if (!ihost->remote_node_context_table) return -ENOMEM; - writel(lower_32_bits(dma), &scic->smu_registers->remote_node_context_lower); - writel(upper_32_bits(dma), &scic->smu_registers->remote_node_context_upper); + writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); - size = scic->task_context_entries * sizeof(struct scu_task_context), - scic->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); - if (!scic->task_context_table) + size = ihost->task_context_entries * sizeof(struct scu_task_context), + ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); + if (!ihost->task_context_table) return -ENOMEM; - scic->task_context_dma = dma; - writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); - writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); + ihost->task_context_dma = dma; + writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); - err = scic_sds_unsolicited_frame_control_construct(scic); + err = scic_sds_unsolicited_frame_control_construct(ihost); if (err) return err; @@ -2350,112 +2331,112 @@ static int scic_controller_mem_init(struct scic_sds_controller *scic) * Inform the silicon as to the location of the UF headers and * address table. */ - writel(lower_32_bits(scic->uf_control.headers.physical_address), - &scic->scu_registers->sdma.uf_header_base_address_lower); - writel(upper_32_bits(scic->uf_control.headers.physical_address), - &scic->scu_registers->sdma.uf_header_base_address_upper); + writel(lower_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_lower); + writel(upper_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_upper); - writel(lower_32_bits(scic->uf_control.address_table.physical_address), - &scic->scu_registers->sdma.uf_address_table_lower); - writel(upper_32_bits(scic->uf_control.address_table.physical_address), - &scic->scu_registers->sdma.uf_address_table_upper); + writel(lower_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_lower); + writel(upper_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_upper); return 0; } -int isci_host_init(struct isci_host *isci_host) +int isci_host_init(struct isci_host *ihost) { int err = 0, i; enum sci_status status; union scic_oem_parameters oem; union scic_user_parameters scic_user_params; - struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev); + struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); - spin_lock_init(&isci_host->state_lock); - spin_lock_init(&isci_host->scic_lock); - init_waitqueue_head(&isci_host->eventq); + spin_lock_init(&ihost->state_lock); + spin_lock_init(&ihost->scic_lock); + init_waitqueue_head(&ihost->eventq); - isci_host_change_state(isci_host, isci_starting); + isci_host_change_state(ihost, isci_starting); - status = scic_controller_construct(&isci_host->sci, scu_base(isci_host), - smu_base(isci_host)); + status = scic_controller_construct(ihost, scu_base(ihost), + smu_base(ihost)); if (status != SCI_SUCCESS) { - dev_err(&isci_host->pdev->dev, + dev_err(&ihost->pdev->dev, "%s: scic_controller_construct failed - status = %x\n", __func__, status); return -ENODEV; } - isci_host->sas_ha.dev = &isci_host->pdev->dev; - isci_host->sas_ha.lldd_ha = isci_host; + ihost->sas_ha.dev = &ihost->pdev->dev; + ihost->sas_ha.lldd_ha = ihost; /* * grab initial values stored in the controller object for OEM and USER * parameters */ - isci_user_parameters_get(isci_host, &scic_user_params); - status = scic_user_parameters_set(&isci_host->sci, + isci_user_parameters_get(ihost, &scic_user_params); + status = scic_user_parameters_set(ihost, &scic_user_params); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: scic_user_parameters_set failed\n", __func__); return -ENODEV; } - scic_oem_parameters_get(&isci_host->sci, &oem); + scic_oem_parameters_get(ihost, &oem); /* grab any OEM parameters specified in orom */ if (pci_info->orom) { status = isci_parse_oem_parameters(&oem, pci_info->orom, - isci_host->id); + ihost->id); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "parsing firmware oem parameters failed\n"); return -EINVAL; } } - status = scic_oem_parameters_set(&isci_host->sci, &oem); + status = scic_oem_parameters_set(ihost, &oem); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: scic_oem_parameters_set failed\n", __func__); return -ENODEV; } - tasklet_init(&isci_host->completion_tasklet, - isci_host_completion_routine, (unsigned long)isci_host); + tasklet_init(&ihost->completion_tasklet, + isci_host_completion_routine, (unsigned long)ihost); - INIT_LIST_HEAD(&isci_host->requests_to_complete); - INIT_LIST_HEAD(&isci_host->requests_to_errorback); + INIT_LIST_HEAD(&ihost->requests_to_complete); + INIT_LIST_HEAD(&ihost->requests_to_errorback); - spin_lock_irq(&isci_host->scic_lock); - status = scic_controller_initialize(&isci_host->sci); - spin_unlock_irq(&isci_host->scic_lock); + spin_lock_irq(&ihost->scic_lock); + status = scic_controller_initialize(ihost); + spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: scic_controller_initialize failed -" " status = 0x%x\n", __func__, status); return -ENODEV; } - err = scic_controller_mem_init(&isci_host->sci); + err = scic_controller_mem_init(ihost); if (err) return err; for (i = 0; i < SCI_MAX_PORTS; i++) - isci_port_init(&isci_host->ports[i], isci_host, i); + isci_port_init(&ihost->ports[i], ihost, i); for (i = 0; i < SCI_MAX_PHYS; i++) - isci_phy_init(&isci_host->phys[i], isci_host, i); + isci_phy_init(&ihost->phys[i], ihost, i); for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { - struct isci_remote_device *idev = &isci_host->devices[i]; + struct isci_remote_device *idev = &ihost->devices[i]; INIT_LIST_HEAD(&idev->reqs_in_process); INIT_LIST_HEAD(&idev->node); @@ -2465,63 +2446,62 @@ int isci_host_init(struct isci_host *isci_host) struct isci_request *ireq; dma_addr_t dma; - ireq = dmam_alloc_coherent(&isci_host->pdev->dev, + ireq = dmam_alloc_coherent(&ihost->pdev->dev, sizeof(struct isci_request), &dma, GFP_KERNEL); if (!ireq) return -ENOMEM; - ireq->tc = &isci_host->sci.task_context_table[i]; - ireq->owning_controller = &isci_host->sci; + ireq->tc = &ihost->task_context_table[i]; + ireq->owning_controller = ihost; spin_lock_init(&ireq->state_lock); ireq->request_daddr = dma; - ireq->isci_host = isci_host; - - isci_host->reqs[i] = ireq; + ireq->isci_host = ihost; + ihost->reqs[i] = ireq; } return 0; } -void scic_sds_controller_link_up(struct scic_sds_controller *scic, +void scic_sds_controller_link_up(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { - switch (scic->sm.current_state_id) { + switch (ihost->sm.current_state_id) { case SCIC_STARTING: - sci_del_timer(&scic->phy_timer); - scic->phy_startup_timer_pending = false; - scic->port_agent.link_up_handler(scic, &scic->port_agent, + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); - scic_sds_controller_start_next_phy(scic); + scic_sds_controller_start_next_phy(ihost); break; case SCIC_READY: - scic->port_agent.link_up_handler(scic, &scic->port_agent, + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); break; default: - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkup event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, - scic->sm.current_state_id); + ihost->sm.current_state_id); } } -void scic_sds_controller_link_down(struct scic_sds_controller *scic, +void scic_sds_controller_link_down(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { - switch (scic->sm.current_state_id) { + switch (ihost->sm.current_state_id) { case SCIC_STARTING: case SCIC_READY: - scic->port_agent.link_down_handler(scic, &scic->port_agent, + ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, iport, iphy); break; default: - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkdown event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, - scic->sm.current_state_id); + ihost->sm.current_state_id); } } @@ -2530,14 +2510,13 @@ void scic_sds_controller_link_down(struct scic_sds_controller *scic, * controller are still in the stopping state. * */ -static bool scic_sds_controller_has_remote_devices_stopping( - struct scic_sds_controller *controller) +static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; - for (index = 0; index < controller->remote_node_entries; index++) { - if ((controller->device_table[index] != NULL) && - (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) + for (index = 0; index < ihost->remote_node_entries; index++) { + if ((ihost->device_table[index] != NULL) && + (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) return true; } @@ -2548,20 +2527,20 @@ static bool scic_sds_controller_has_remote_devices_stopping( * This method is called by the remote device to inform the controller * object that the remote device has stopped. */ -void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, +void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, struct isci_remote_device *idev) { - if (scic->sm.current_state_id != SCIC_STOPPING) { - dev_dbg(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_STOPPING) { + dev_dbg(&ihost->pdev->dev, "SCIC Controller 0x%p remote device stopped event " "from device 0x%p in unexpected state %d\n", - scic, idev, - scic->sm.current_state_id); + ihost, idev, + ihost->sm.current_state_id); return; } - if (!scic_sds_controller_has_remote_devices_stopping(scic)) { - sci_change_state(&scic->sm, SCIC_STOPPED); + if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { + sci_change_state(&ihost->sm, SCIC_STOPPED); } } @@ -2573,32 +2552,32 @@ void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, * */ void scic_sds_controller_post_request( - struct scic_sds_controller *scic, + struct isci_host *ihost, u32 request) { - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller 0x%p post request 0x%08x\n", __func__, - scic, + ihost, request); - writel(request, &scic->smu_registers->post_context_port); + writel(request, &ihost->smu_registers->post_context_port); } -struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) +struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) { u16 task_index; u16 task_sequence; task_index = ISCI_TAG_TCI(io_tag); - if (task_index < scic->task_context_entries) { - struct isci_request *ireq = scic_to_ihost(scic)->reqs[task_index]; + if (task_index < ihost->task_context_entries) { + struct isci_request *ireq = ihost->reqs[task_index]; if (test_bit(IREQ_ACTIVE, &ireq->flags)) { task_sequence = ISCI_TAG_SEQ(io_tag); - if (task_sequence == scic->io_request_sequence[task_index]) + if (task_sequence == ihost->io_request_sequence[task_index]) return ireq; } } @@ -2621,7 +2600,7 @@ struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 i * node index available. */ enum sci_status scic_sds_controller_allocate_remote_node_context( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id) { @@ -2629,11 +2608,11 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( u32 remote_node_count = scic_sds_remote_device_node_count(idev); node_index = scic_sds_remote_node_table_allocate_remote_node( - &scic->available_remote_nodes, remote_node_count + &ihost->available_remote_nodes, remote_node_count ); if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { - scic->device_table[node_index] = idev; + ihost->device_table[node_index] = idev; *node_id = node_index; @@ -2653,17 +2632,17 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( * */ void scic_sds_controller_free_remote_node_context( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id) { u32 remote_node_count = scic_sds_remote_device_node_count(idev); - if (scic->device_table[node_id] == idev) { - scic->device_table[node_id] = NULL; + if (ihost->device_table[node_id] == idev) { + ihost->device_table[node_id] = NULL; scic_sds_remote_node_table_release_remote_node_index( - &scic->available_remote_nodes, remote_node_count, node_id + &ihost->available_remote_nodes, remote_node_count, node_id ); } } @@ -2677,14 +2656,14 @@ void scic_sds_controller_free_remote_node_context( * union scu_remote_node_context* */ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct scic_sds_controller *scic, + struct isci_host *ihost, u16 node_id ) { if ( - (node_id < scic->remote_node_entries) - && (scic->device_table[node_id] != NULL) + (node_id < ihost->remote_node_entries) + && (ihost->device_table[node_id] != NULL) ) { - return &scic->remote_node_context_table[node_id]; + return &ihost->remote_node_context_table[node_id]; } return NULL; @@ -2722,13 +2701,13 @@ void scic_sds_controller_copy_sata_response( * */ void scic_sds_controller_release_frame( - struct scic_sds_controller *scic, + struct isci_host *ihost, u32 frame_index) { if (scic_sds_unsolicited_frame_control_release_frame( - &scic->uf_control, frame_index) == true) - writel(scic->uf_control.get, - &scic->scu_registers->sdma.unsolicited_frame_get_pointer); + &ihost->uf_control, frame_index) == true) + writel(ihost->uf_control.get, + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } void isci_tci_free(struct isci_host *ihost, u16 tci) @@ -2757,7 +2736,7 @@ u16 isci_alloc_tag(struct isci_host *ihost) { if (isci_tci_space(ihost)) { u16 tci = isci_tci_alloc(ihost); - u8 seq = ihost->sci.io_request_sequence[tci]; + u8 seq = ihost->io_request_sequence[tci]; return ISCI_TAG(seq, tci); } @@ -2767,7 +2746,6 @@ u16 isci_alloc_tag(struct isci_host *ihost) enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) { - struct scic_sds_controller *scic = &ihost->sci; u16 tci = ISCI_TAG_TCI(io_tag); u16 seq = ISCI_TAG_SEQ(io_tag); @@ -2775,8 +2753,8 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) if (isci_tci_active(ihost) == 0) return SCI_FAILURE_INVALID_IO_TAG; - if (seq == scic->io_request_sequence[tci]) { - scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); + if (seq == ihost->io_request_sequence[tci]) { + ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); isci_tci_free(ihost, tci); @@ -2797,23 +2775,23 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) * @io_tag: This parameter specifies a previously allocated IO tag that the * user desires to be utilized for this request. */ -enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, +enum sci_status scic_controller_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; - if (scic->sm.current_state_id != SCIC_READY) { - dev_warn(scic_to_dev(scic), "invalid state to start I/O"); + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_io(scic, idev, ireq); + status = scic_sds_remote_device_start_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); + scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); return SCI_SUCCESS; } @@ -2834,14 +2812,14 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, * for the request. Determine the failure situations and return values. */ enum sci_status scic_controller_terminate_request( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; - if (scic->sm.current_state_id != SCIC_READY) { - dev_warn(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "invalid state to terminate request\n"); return SCI_FAILURE_INVALID_STATE; } @@ -2854,7 +2832,7 @@ enum sci_status scic_controller_terminate_request( * Utilize the original post context command and or in the POST_TC_ABORT * request sub-type. */ - scic_sds_controller_post_request(scic, + scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq) | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); return SCI_SUCCESS; @@ -2872,19 +2850,19 @@ enum sci_status scic_controller_terminate_request( * @io_request: the handle to the io request object to complete. */ enum sci_status scic_controller_complete_io( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; u16 index; - switch (scic->sm.current_state_id) { + switch (ihost->sm.current_state_id) { case SCIC_STOPPING: /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: - status = scic_sds_remote_device_complete_io(scic, idev, ireq); + status = scic_sds_remote_device_complete_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; @@ -2892,7 +2870,7 @@ enum sci_status scic_controller_complete_io( clear_bit(IREQ_ACTIVE, &ireq->flags); return SCI_SUCCESS; default: - dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); + dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); return SCI_FAILURE_INVALID_STATE; } @@ -2900,15 +2878,15 @@ enum sci_status scic_controller_complete_io( enum sci_status scic_controller_continue_io(struct isci_request *ireq) { - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; - if (scic->sm.current_state_id != SCIC_READY) { - dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); return SCI_FAILURE_INVALID_STATE; } set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); + scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); return SCI_SUCCESS; } @@ -2922,21 +2900,21 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) * @task_request: the handle to the task request object to start. */ enum sci_task_status scic_controller_start_task( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; - if (scic->sm.current_state_id != SCIC_READY) { - dev_warn(scic_to_dev(scic), + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "%s: SCIC Controller starting task from invalid " "state\n", __func__); return SCI_TASK_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_task(scic, idev, ireq); + status = scic_sds_remote_device_start_task(ihost, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); @@ -2950,7 +2928,7 @@ enum sci_task_status scic_controller_start_task( case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(scic, + scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); break; default: diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index ca2e3b0ee0d..013f672a8fd 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -106,7 +106,7 @@ struct scic_power_control { }; struct scic_sds_port_configuration_agent; -typedef void (*port_config_fn)(struct scic_sds_controller *, +typedef void (*port_config_fn)(struct isci_host *, struct scic_sds_port_configuration_agent *, struct isci_port *, struct isci_phy *); @@ -124,171 +124,66 @@ struct scic_sds_port_configuration_agent { }; /** - * struct scic_sds_controller - - * - * This structure represents the SCU controller object. + * isci_host - primary host/controller object + * @timer: timeout start/stop operations + * @device_table: rni (hw remote node index) to remote device lookup table + * @available_remote_nodes: rni allocator + * @power_control: manage device spin up + * @io_request_sequence: generation number for tci's (task contexts) + * @task_context_table: hw task context table + * @remote_node_context_table: hw remote node context table + * @completion_queue: hw-producer driver-consumer communication ring + * @completion_queue_get: tracks the driver 'head' of the ring to notify hw + * @logical_port_entries: min({driver|silicon}-supported-port-count) + * @remote_node_entries: min({driver|silicon}-supported-node-count) + * @task_context_entries: min({driver|silicon}-supported-task-count) + * @phy_timer: phy startup timer + * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for + * the phy index is set so further notifications are not + * made. Once the phy reports link up and is made part of a + * port then this bit is cleared. + */ -struct scic_sds_controller { - /** - * This field contains the information for the base controller state - * machine. - */ +struct isci_host { struct sci_base_state_machine sm; - - /** - * Timer for controller start/stop operations. - */ + /* XXX can we time this externally */ struct sci_timer timer; - - /** - * This field contains the user parameters to be utilized for this - * core controller object. - */ + /* XXX drop reference module params directly */ union scic_user_parameters user_parameters; - - /** - * This field contains the OEM parameters to be utilized for this - * core controller object. - */ + /* XXX no need to be a union */ union scic_oem_parameters oem_parameters; - - /** - * This field contains the port configuration agent for this controller. - */ struct scic_sds_port_configuration_agent port_agent; - - /** - * This field is the array of device objects that are currently constructed - * for this controller object. This table is used as a fast lookup of device - * objects that need to handle device completion notifications from the - * hardware. The table is RNi based. - */ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; - - /** - * This field is the free RNi data structure - */ struct scic_remote_node_table available_remote_nodes; - - /** - * This filed is the struct scic_power_control data used to controll when direct - * attached devices can consume power. - */ struct scic_power_control power_control; - - /* sequence number per tci */ u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; - - /** - * This field is a pointer to the memory allocated by the driver for the task - * context table. This data is shared between the hardware and software. - */ struct scu_task_context *task_context_table; dma_addr_t task_context_dma; - - /** - * This field is a pointer to the memory allocated by the driver for the - * remote node context table. This table is shared between the hardware and - * software. - */ union scu_remote_node_context *remote_node_context_table; - - /** - * This field is a pointer to the completion queue. This memory is - * written to by the hardware and read by the software. - */ u32 *completion_queue; - - /** - * This field is the software copy of the completion queue get pointer. The - * controller object writes this value to the hardware after processing the - * completion entries. - */ u32 completion_queue_get; - - /** - * This field is the minimum of the number of hardware supported port entries - * and the software requested port entries. - */ u32 logical_port_entries; - - /** - * This field is the minimum number of devices supported by the hardware and - * the number of devices requested by the software. - */ u32 remote_node_entries; - - /** - * This field is the minimum number of IO requests supported by the hardware - * and the number of IO requests requested by the software. - */ u32 task_context_entries; - - /** - * This object contains all of the unsolicited frame specific - * data utilized by the core controller. - */ struct scic_sds_unsolicited_frame_control uf_control; - /* Phy Startup Data */ - /** - * Timer for controller phy request startup. On controller start the - * controller will start each PHY individually in order of phy index. - */ + /* phy startup */ struct sci_timer phy_timer; - - /** - * This field is set when the phy_timer is running and is cleared when - * the phy_timer is stopped. - */ + /* XXX kill */ bool phy_startup_timer_pending; - - /** - * This field is the index of the next phy start. It is initialized to 0 and - * increments for each phy index that is started. - */ u32 next_phy_to_start; - - /** - * This field controlls the invalid link up notifications to the SCI_USER. If - * an invalid_link_up notification is reported a bit for the PHY index is set - * so further notifications are not made. Once the PHY object reports link up - * and is made part of a port then this bit for the PHY index is cleared. - */ u8 invalid_phy_mask; - /* - * This field saves the current interrupt coalescing number of the controller. - */ + /* TODO attempt dynamic interrupt coalescing scheme */ u16 interrupt_coalesce_number; - - /* - * This field saves the current interrupt coalescing timeout value in microseconds. - */ u32 interrupt_coalesce_timeout; - - /** - * This field is a pointer to the memory mapped register space for the - * struct smu_registers. - */ struct smu_registers __iomem *smu_registers; - - /** - * This field is a pointer to the memory mapped register space for the - * struct scu_registers. - */ struct scu_registers __iomem *scu_registers; -}; - -struct isci_host { - struct scic_sds_controller sci; u16 tci_head; u16 tci_tail; u16 tci_pool[SCI_MAX_IO_REQUESTS]; - union scic_oem_parameters oem_parameters; - int id; /* unique within a given pci device */ struct isci_phy phys[SCI_MAX_PHYS]; struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ @@ -464,14 +359,6 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) return dev->port->ha->lldd_ha; } -static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic) -{ - /* XXX delete after merging scic_sds_contoller and isci_host */ - struct isci_host *ihost = container_of(scic, typeof(*ihost), sci); - - return ihost; -} - /** * scic_sds_controller_get_protocol_engine_group() - * @@ -518,11 +405,6 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i #define scic_sds_controller_clear_invalid_phy(controller, phy) \ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) -static inline struct device *scic_to_dev(struct scic_sds_controller *scic) -{ - return &scic_to_ihost(scic)->pdev->dev; -} - static inline struct device *sciphy_to_dev(struct isci_phy *iphy) { @@ -578,54 +460,54 @@ static inline bool is_c0(void) return isci_si_rev > ISCI_SI_REVB0; } -void scic_sds_controller_post_request(struct scic_sds_controller *scic, +void scic_sds_controller_post_request(struct isci_host *ihost, u32 request); -void scic_sds_controller_release_frame(struct scic_sds_controller *scic, +void scic_sds_controller_release_frame(struct isci_host *ihost, u32 frame_index); void scic_sds_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer); -enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic, +enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id); void scic_sds_controller_free_remote_node_context( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id); union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct scic_sds_controller *scic, + struct isci_host *ihost, u16 node_id); -struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, +struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag); void scic_sds_controller_power_control_queue_insert( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_phy *iphy); void scic_sds_controller_power_control_queue_remove( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_phy *iphy); void scic_sds_controller_link_up( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); void scic_sds_controller_link_down( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); void scic_sds_controller_remote_device_stopped( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev); void scic_sds_controller_copy_task_context( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_request *ireq); -void scic_sds_controller_register_setup(struct scic_sds_controller *scic); +void scic_sds_controller_register_setup(struct isci_host *ihost); enum sci_status scic_controller_continue_io(struct isci_request *ireq); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); @@ -655,25 +537,25 @@ void isci_host_remote_device_start_complete( enum sci_status); void scic_controller_disable_interrupts( - struct scic_sds_controller *scic); + struct isci_host *ihost); enum sci_status scic_controller_start_io( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); enum sci_task_status scic_controller_start_task( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_controller_terminate_request( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_controller_complete_io( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); @@ -681,6 +563,6 @@ void scic_sds_port_configuration_agent_construct( struct scic_sds_port_configuration_agent *port_agent); enum sci_status scic_sds_port_configuration_agent_initialize( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent); #endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index bbfb6e56320..68ca1a4f30a 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -548,13 +548,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic static void __devexit isci_pci_remove(struct pci_dev *pdev) { - struct isci_host *isci_host; + struct isci_host *ihost; int i; - for_each_isci_host(i, isci_host, pdev) { - isci_unregister(isci_host); - isci_host_deinit(isci_host); - scic_controller_disable_interrupts(&isci_host->sci); + for_each_isci_host(i, ihost, pdev) { + isci_unregister(ihost); + isci_host_deinit(ihost); + scic_controller_disable_interrupts(ihost); } } diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index fd0e9734e5d..ca96b5ad0d5 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -112,13 +112,13 @@ static enum sci_status scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, struct scu_link_layer_registers __iomem *link_layer_registers) { - struct scic_sds_controller *scic = + struct isci_host *ihost = iphy->owning_port->owning_controller; int phy_idx = iphy->phy_index; struct sci_phy_user_params *phy_user = - &scic->user_parameters.sds1.phys[phy_idx]; + &ihost->user_parameters.sds1.phys[phy_idx]; struct sci_phy_oem_params *phy_oem = - &scic->oem_parameters.sds1.phys[phy_idx]; + &ihost->oem_parameters.sds1.phys[phy_idx]; u32 phy_configuration; struct scic_phy_cap phy_cap; u32 parity_check = 0; @@ -169,7 +169,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; - if (scic->oem_parameters.sds1.controller.do_enable_ssc == true) { + if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) { phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; @@ -216,7 +216,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, &iphy->link_layer_registers->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, - (u8)scic->user_parameters.sds1.no_outbound_task_timeout); + (u8)ihost->user_parameters.sds1.no_outbound_task_timeout); switch(phy_user->max_speed_generation) { case SCIC_SDS_PARM_GEN3_SPEED: @@ -255,7 +255,7 @@ static void phy_sata_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); - struct isci_host *ihost = scic_to_ihost(iphy->owning_port->owning_controller); + struct isci_host *ihost = iphy->owning_port->owning_controller; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); @@ -890,7 +890,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) { enum scic_sds_phy_states state = iphy->sm.current_state_id; - struct scic_sds_controller *scic = iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; enum sci_status result; unsigned long flags; @@ -899,7 +899,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, u32 *frame_words; struct sas_identify_frame iaf; - result = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_words); @@ -933,7 +933,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, "unexpected frame id %x\n", __func__, frame_index); - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return result; } case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { @@ -950,7 +950,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, if ((frame_header->fis_type == FIS_REGD2H) && !(frame_header->status & ATA_BUSY)) { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&fis_frame_data); @@ -971,7 +971,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, __func__, frame_index); /* Regardless of the result we are done with this frame with it */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return result; } @@ -994,33 +994,33 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - struct scic_sds_controller *scic = iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(scic, iphy); + scic_sds_controller_power_control_queue_insert(ihost, iphy); } static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - struct scic_sds_controller *scic = iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(scic, iphy); + scic_sds_controller_power_control_queue_remove(ihost, iphy); } static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - struct scic_sds_controller *scic = iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(scic, iphy); + scic_sds_controller_power_control_queue_insert(ihost, iphy); } static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - struct scic_sds_controller *scic = iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(scic, iphy); + scic_sds_controller_power_control_queue_remove(ihost, iphy); } static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) @@ -1313,7 +1313,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) u64 sci_sas_addr; __be64 sas_addr; - scic_oem_parameters_get(&ihost->sci, &oem); + scic_oem_parameters_get(ihost, &oem); sci_sas_addr = oem.sds1.phys[index].sas_address.high; sci_sas_addr <<= 32; sci_sas_addr |= oem.sds1.phys[index].sas_address.low; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index df37b1bf7d1..c434d5a0eff 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -365,11 +365,11 @@ static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *i "%s: isci_port = %p\n", __func__, isci_port); } -static void isci_port_stop_complete(struct scic_sds_controller *scic, +static void isci_port_stop_complete(struct isci_host *ihost, struct isci_port *iport, enum sci_status completion_status) { - dev_dbg(&scic_to_ihost(scic)->pdev->dev, "Port stop complete\n"); + dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); } /** @@ -541,8 +541,7 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, /* Make sure that this phy is part of this port */ if (iport->phy_table[iphy->phy_index] == iphy && phy_get_non_dummy_port(iphy) == iport) { - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; /* Yep it is assigned to this port so remove it */ scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); @@ -654,10 +653,10 @@ static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) */ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; struct scu_task_context *task_context; - task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)]; + task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; memset(task_context, 0, sizeof(struct scu_task_context)); task_context->initiator_request = 1; @@ -674,13 +673,13 @@ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) - isci_free_tag(scic_to_ihost(scic), iport->reserved_tag); + isci_free_tag(ihost, iport->reserved_tag); if (iport->reserved_rni != SCU_DUMMY_INDEX) - scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, + scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 1, iport->reserved_rni); iport->reserved_rni = SCU_DUMMY_INDEX; @@ -749,15 +748,14 @@ static void scic_sds_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) scic_sds_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; - scic_sds_controller_clear_invalid_phy(scic, iphy); + scic_sds_controller_clear_invalid_phy(ihost, iphy); if (do_notify_user == true) isci_port_link_up(ihost, iport, iphy); @@ -767,8 +765,7 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { - struct scic_sds_controller *scic = scic_sds_port_get_controller(iport); - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = scic_sds_port_get_controller(iport); iport->active_phy_mask &= ~(1 << iphy->phy_index); @@ -793,16 +790,16 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, static void scic_sds_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; /* * Check to see if we have alreay reported this link as bad and if * not go ahead and tell the SCI_USER that we have discovered an * invalid link. */ - if ((scic->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { - scic_sds_controller_set_invalid_phy(scic, iphy); - dev_warn(&scic_to_ihost(scic)->pdev->dev, "Invalid link up!\n"); + if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { + scic_sds_controller_set_invalid_phy(ihost, iphy); + dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); } } @@ -931,7 +928,7 @@ static void port_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_port *iport = container_of(tmr, typeof(*iport), timer); - struct isci_host *ihost = scic_to_ihost(iport->owning_controller); + struct isci_host *ihost = iport->owning_controller; unsigned long flags; u32 current_state; @@ -1041,19 +1038,19 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) */ static void scic_sds_port_post_dummy_request(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; struct scu_task_context *tc; u32 command; - tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc->abort = 0; command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(scic, command); + scic_sds_controller_post_request(ihost, command); } /** @@ -1065,19 +1062,19 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) */ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; struct scu_task_context *tc; u32 command; - tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc->abort = 1; command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(scic, command); + scic_sds_controller_post_request(ihost, command); } /** @@ -1115,8 +1112,7 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state { u32 index; struct isci_port *iport = container_of(sm, typeof(*iport), sm); - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; isci_port_ready(ihost, iport); @@ -1141,13 +1137,13 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; union scu_remote_node_context *rnc; u16 rni = iport->reserved_rni; u32 command; - rnc = &scic->remote_node_context_table[rni]; + rnc = &ihost->remote_node_context_table[rni]; rnc->ssp.is_valid = false; @@ -1155,13 +1151,13 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) * controller and give it ample time to act before posting the rnc * invalidate */ - readl(&scic->smu_registers->interrupt_status); /* flush */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ udelay(10); command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(scic, command); + scic_sds_controller_post_request(ihost, command); } /** @@ -1175,8 +1171,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; /* * Kill the dummy task for this port if it has not yet posted @@ -1194,8 +1189,7 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; if (iport->active_phy_mask == 0) { isci_port_not_ready(ihost, iport); @@ -1218,7 +1212,7 @@ static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_ enum sci_status scic_sds_port_start(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; enum sci_status status = SCI_SUCCESS; enum scic_sds_port_states state; u32 phy_mask; @@ -1241,7 +1235,7 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) if (iport->reserved_rni == SCU_DUMMY_INDEX) { u16 rni = scic_sds_remote_node_table_allocate_remote_node( - &scic->available_remote_nodes, 1); + &ihost->available_remote_nodes, 1); if (rni != SCU_DUMMY_INDEX) scic_sds_port_construct_dummy_rnc(iport, rni); @@ -1251,7 +1245,6 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) } if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { - struct isci_host *ihost = scic_to_ihost(scic); u16 tag; tag = isci_alloc_tag(ihost); @@ -1634,30 +1627,30 @@ scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) { - struct scic_sds_controller *scic = iport->owning_controller; + struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; union scu_remote_node_context *rnc; u16 rni = iport->reserved_rni; u32 command; - rnc = &scic->remote_node_context_table[rni]; + rnc = &ihost->remote_node_context_table[rni]; rnc->ssp.is_valid = true; command = SCU_CONTEXT_COMMAND_POST_RNC_32 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(scic, command); + scic_sds_controller_post_request(ihost, command); /* ensure hardware has seen the post rnc command and give it * ample time to act before sending the suspend */ - readl(&scic->smu_registers->interrupt_status); /* flush */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ udelay(10); command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(scic, command); + scic_sds_controller_post_request(ihost, command); } static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) @@ -1684,8 +1677,7 @@ static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; u32 prev_state; prev_state = iport->sm.previous_state_id; @@ -1758,7 +1750,7 @@ static const struct sci_base_state scic_sds_port_state_table[] = { }; void scic_sds_port_construct(struct isci_port *iport, u8 index, - struct scic_sds_controller *scic) + struct isci_host *ihost) { sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); @@ -1767,7 +1759,7 @@ void scic_sds_port_construct(struct isci_port *iport, u8 index, iport->active_phy_mask = 0; iport->ready_exit = false; - iport->owning_controller = scic; + iport->owning_controller = ihost; iport->started_request_count = 0; iport->assigned_device_count = 0; @@ -1810,8 +1802,7 @@ void scic_sds_port_broadcast_change_received( struct isci_port *iport, struct isci_phy *iphy) { - struct scic_sds_controller *scic = iport->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = iport->owning_controller; /* notify the user. */ isci_port_bc_change_received(ihost, iport, iphy); diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index b9bc89bf651..9a9be7b47b4 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -115,7 +115,7 @@ struct isci_port { u32 assigned_device_count; u32 not_ready_reason; struct isci_phy *phy_table[SCI_MAX_PHYS]; - struct scic_sds_controller *owning_controller; + struct isci_host *owning_controller; struct sci_timer timer; struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers; /* XXX rework: only one register, no need to replicate per-port */ @@ -243,7 +243,7 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport void scic_sds_port_construct( struct isci_port *iport, u8 port_index, - struct scic_sds_controller *scic); + struct isci_host *ihost); enum sci_status scic_sds_port_initialize( struct isci_port *iport, diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index bb62d2a2521..a0a135d54e9 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -113,7 +113,7 @@ static s32 sci_sas_address_compare( * NULL if there is no matching port for the phy. */ static struct isci_port *scic_sds_port_configuration_agent_find_port( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct isci_phy *iphy) { u8 i; @@ -130,8 +130,7 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( scic_sds_phy_get_sas_address(iphy, &phy_sas_address); scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); - for (i = 0; i < scic->logical_port_entries; i++) { - struct isci_host *ihost = scic_to_ihost(scic); + for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; scic_sds_port_get_sas_address(iport, &port_sas_address); @@ -158,10 +157,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( * the port configuration is not valid for this port configuration agent. */ static enum sci_status scic_sds_port_configuration_agent_validate_ports( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent) { - struct isci_host *ihost = scic_to_ihost(controller); struct sci_sas_address first_address; struct sci_sas_address second_address; @@ -239,17 +237,11 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * Manual port configuration agent routines * ****************************************************************************** */ -/** - * - * - * This routine will verify that all of the phys in the same port are using the - * same SAS address. - */ -static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( - struct scic_sds_controller *controller, - struct scic_sds_port_configuration_agent *port_agent) +/* verify all of the phys in the same port are using the same SAS address */ +static enum sci_status +scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, + struct scic_sds_port_configuration_agent *port_agent) { - struct isci_host *ihost = scic_to_ihost(controller); u32 phy_mask; u32 assigned_phy_mask; struct sci_sas_address sas_address; @@ -262,7 +254,7 @@ static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( sas_address.low = 0; for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { - phy_mask = controller->oem_parameters.sds1.ports[port_index].phy_mask; + phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask; if (!phy_mask) continue; @@ -324,7 +316,7 @@ static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( phy_index++; } - return scic_sds_port_configuration_agent_validate_ports(controller, port_agent); + return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); } static void mpc_agent_timeout(unsigned long data) @@ -332,14 +324,12 @@ static void mpc_agent_timeout(unsigned long data) u8 index; struct sci_timer *tmr = (struct sci_timer *)data; struct scic_sds_port_configuration_agent *port_agent; - struct scic_sds_controller *scic; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); - scic = container_of(port_agent, typeof(*scic), port_agent); - ihost = scic_to_ihost(scic); + ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); @@ -355,7 +345,7 @@ static void mpc_agent_timeout(unsigned long data) struct isci_phy *iphy = &ihost->phys[index]; if (configure_phy_mask & (1 << index)) { - port_agent->link_up_handler(scic, port_agent, + port_agent->link_up_handler(ihost, port_agent, phy_get_non_dummy_port(iphy), iphy); } @@ -365,7 +355,7 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -static void scic_sds_mpc_agent_link_up(struct scic_sds_controller *controller, +static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) @@ -401,7 +391,7 @@ static void scic_sds_mpc_agent_link_up(struct scic_sds_controller *controller, * link down notification from a phy that has no assocoated port? */ static void scic_sds_mpc_agent_link_down( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) @@ -438,26 +428,17 @@ static void scic_sds_mpc_agent_link_down( } } -/* - * ****************************************************************************** - * Automatic port configuration agent routines - * ****************************************************************************** */ - -/** - * - * - * This routine will verify that the phys are assigned a valid SAS address for - * automatic port configuration mode. +/* verify phys are assigned a valid SAS address for automatic port + * configuration mode. */ -static enum sci_status scic_sds_apc_agent_validate_phy_configuration( - struct scic_sds_controller *controller, - struct scic_sds_port_configuration_agent *port_agent) +static enum sci_status +scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, + struct scic_sds_port_configuration_agent *port_agent) { u8 phy_index; u8 port_index; struct sci_sas_address sas_address; struct sci_sas_address phy_assigned_address; - struct isci_host *ihost = scic_to_ihost(controller); phy_index = 0; @@ -484,10 +465,10 @@ static enum sci_status scic_sds_apc_agent_validate_phy_configuration( } } - return scic_sds_port_configuration_agent_validate_ports(controller, port_agent); + return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); } -static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *controller, +static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent, struct isci_phy *iphy, bool start_timer) @@ -496,9 +477,8 @@ static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *contr enum sci_status status; struct isci_port *iport; enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; - struct isci_host *ihost = scic_to_ihost(controller); - iport = scic_sds_port_configuration_agent_find_port(controller, iphy); + iport = scic_sds_port_configuration_agent_find_port(ihost, iphy); if (iport) { if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) @@ -619,7 +599,7 @@ static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *contr * notifications. Is it possible to get a link down notification from a phy * that has no assocoated port? */ -static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, +static void scic_sds_apc_agent_link_up(struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) @@ -629,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; - scic_sds_apc_agent_configure_ports(scic, port_agent, iphy, true); + scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true); } else { /* the phy is already the part of the port */ u32 port_state = iport->sm.current_state_id; @@ -658,7 +638,7 @@ static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, * port? */ static void scic_sds_apc_agent_link_down( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) @@ -683,14 +663,12 @@ static void apc_agent_timeout(unsigned long data) u32 index; struct sci_timer *tmr = (struct sci_timer *)data; struct scic_sds_port_configuration_agent *port_agent; - struct scic_sds_controller *scic; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); - scic = container_of(port_agent, typeof(*scic), port_agent); - ihost = scic_to_ihost(scic); + ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); @@ -708,7 +686,7 @@ static void apc_agent_timeout(unsigned long data) if ((configure_phy_mask & (1 << index)) == 0) continue; - scic_sds_apc_agent_configure_ports(scic, port_agent, + scic_sds_apc_agent_configure_ports(ihost, port_agent, &ihost->phys[index], false); } @@ -748,17 +726,17 @@ void scic_sds_port_configuration_agent_construct( } enum sci_status scic_sds_port_configuration_agent_initialize( - struct scic_sds_controller *scic, + struct isci_host *ihost, struct scic_sds_port_configuration_agent *port_agent) { enum sci_status status; enum scic_port_configuration_mode mode; - mode = scic->oem_parameters.sds1.controller.mode_type; + mode = ihost->oem_parameters.sds1.controller.mode_type; if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { status = scic_sds_mpc_agent_validate_phy_configuration( - scic, port_agent); + ihost, port_agent); port_agent->link_up_handler = scic_sds_mpc_agent_link_up; port_agent->link_down_handler = scic_sds_mpc_agent_link_down; @@ -766,7 +744,7 @@ enum sci_status scic_sds_port_configuration_agent_initialize( sci_init_timer(&port_agent->timer, mpc_agent_timeout); } else { status = scic_sds_apc_agent_validate_phy_configuration( - scic, port_agent); + ihost, port_agent); port_agent->link_up_handler = scic_sds_apc_agent_link_up; port_agent->link_down_handler = scic_sds_apc_agent_link_down; diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index 95c8d91aab8..e40cb5f6eba 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -165,7 +165,7 @@ struct scic_sds_oem_params; int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); union scic_oem_parameters; -void scic_oem_parameters_get(struct scic_sds_controller *scic, +void scic_oem_parameters_get(struct isci_host *ihost, union scic_oem_parameters *oem); struct isci_orom; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 3b0234049a3..9043b458c99 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -62,7 +62,7 @@ #include "task.h" /** - * isci_remote_device_not_ready() - This function is called by the scic when + * isci_remote_device_not_ready() - This function is called by the ihost when * the remote device is not ready. We mark the isci device as ready (not * "ready_for_io") and signal the waiting proccess. * @isci_host: This parameter specifies the isci host object. @@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, "%s: isci_device = %p request = %p\n", __func__, idev, ireq); - scic_controller_terminate_request(&ihost->sci, + scic_controller_terminate_request(ihost, idev, ireq); } @@ -104,7 +104,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, } /** - * isci_remote_device_ready() - This function is called by the scic when the + * isci_remote_device_ready() - This function is called by the ihost when the * remote device is ready. We mark the isci device as ready and signal the * waiting proccess. * @ihost: our valid isci_host @@ -135,8 +135,7 @@ static void rnc_destruct_done(void *_dev) static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) { - struct scic_sds_controller *scic = idev->owning_port->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status = SCI_SUCCESS; u32 i; @@ -148,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem ireq->target_device != idev) continue; - s = scic_controller_terminate_request(scic, idev, ireq); + s = scic_controller_terminate_request(ihost, idev, ireq); if (s != SCI_SUCCESS) status = s; } @@ -276,7 +275,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * { struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct scic_sds_controller *scic = idev->owning_port->owning_controller; + struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status; switch (state) { @@ -290,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); /* Return the frame back to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_NCQ_ERROR: @@ -303,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * void *frame_header; ssize_t word_cnt; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); if (status != SCI_SUCCESS) @@ -312,7 +311,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * word_cnt = sizeof(hdr) / sizeof(u32); sci_swab32_cpy(&hdr, frame_header, word_cnt); - ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); + ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag)); if (ireq && ireq->target_device == idev) { /* The IO request is now in charge of releasing the frame */ status = scic_sds_io_request_frame_handler(ireq, frame_index); @@ -320,14 +319,14 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * /* We could not map this tag to a valid IO * request Just toss the frame and continue */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); } break; } case SCI_STP_DEV_NCQ: { struct dev_to_host_fis *hdr; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&hdr); if (status != SCI_SUCCESS) @@ -350,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * } else status = SCI_FAILURE; - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); break; } case SCI_STP_DEV_CMD: @@ -461,7 +460,7 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev } } -enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, +enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -597,7 +596,7 @@ static enum sci_status common_complete_io(struct isci_port *iport, return status; } -enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic, +enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -678,7 +677,7 @@ static void scic_sds_remote_device_continue_request(void *dev) scic_controller_continue_io(idev->working_request); } -enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic, +enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -802,13 +801,13 @@ static void remote_device_resume_done(void *_dev) static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) { struct isci_remote_device *idev = _dev; - struct scic_sds_controller *scic = idev->owning_port->owning_controller; + struct isci_host *ihost = idev->owning_port->owning_controller; /* For NCQ operation we do not issue a isci_remote_device_not_ready(). * As a result, avoid sending the ready notification. */ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) - isci_remote_device_ready(scic_to_ihost(scic), idev); + isci_remote_device_ready(ihost, idev); } static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) @@ -836,7 +835,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id { struct sci_base_state_machine *sm = &idev->sm; enum scic_sds_remote_device_states state = sm->current_state_id; - struct scic_sds_controller *scic; + struct isci_host *ihost; if (state != SCI_DEV_STOPPED) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", @@ -844,8 +843,8 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id return SCI_FAILURE_INVALID_STATE; } - scic = idev->owning_port->owning_controller; - scic_sds_controller_free_remote_node_context(scic, idev, + ihost = idev->owning_port->owning_controller; + scic_sds_controller_free_remote_node_context(ihost, idev, idev->rnc.remote_node_index); idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; sci_change_state(sm, SCI_DEV_FINAL); @@ -878,7 +877,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = idev->owning_port->owning_controller; + struct isci_host *ihost = idev->owning_port->owning_controller; u32 prev_state; /* If we are entering from the stopping state let the SCI User know that @@ -886,16 +885,15 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac */ prev_state = idev->sm.previous_state_id; if (prev_state == SCI_DEV_STOPPING) - isci_remote_device_deconstruct(scic_to_ihost(scic), idev); + isci_remote_device_deconstruct(ihost, idev); - scic_sds_controller_remote_device_stopped(scic, idev); + scic_sds_controller_remote_device_stopped(ihost, idev); } static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); @@ -904,7 +902,7 @@ static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_ma static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = idev->owning_port->owning_controller; + struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { @@ -912,7 +910,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); } else - isci_remote_device_ready(scic_to_ihost(scic), idev); + isci_remote_device_ready(ihost, idev); } static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) @@ -921,9 +919,9 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SAS_END_DEV) { - struct scic_sds_controller *scic = idev->owning_port->owning_controller; + struct isci_host *ihost = idev->owning_port->owning_controller; - isci_remote_device_not_ready(scic_to_ihost(scic), idev, + isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); } } @@ -963,40 +961,40 @@ static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); - isci_remote_device_not_ready(scic_to_ihost(scic), idev, + isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); } static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) - isci_remote_device_not_ready(scic_to_ihost(scic), idev, + isci_remote_device_not_ready(ihost, idev, idev->not_ready_reason); } static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); - isci_remote_device_ready(scic_to_ihost(scic), idev); + isci_remote_device_ready(ihost, idev); } static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); - isci_remote_device_not_ready(scic_to_ihost(scic), idev, + isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); } @@ -1303,7 +1301,7 @@ void isci_remote_device_release(struct kref *kref) * @isci_host: This parameter specifies the isci host object. * @isci_device: This parameter specifies the remote device. * - * The status of the scic request to stop. + * The status of the ihost request to stop. */ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) { diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 45798582fc1..bc4da20a13f 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -402,17 +402,17 @@ enum sci_status scic_sds_remote_device_event_handler( u32 event_code); enum sci_status scic_sds_remote_device_start_io( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_remote_device_start_task( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); enum sci_status scic_sds_remote_device_complete_io( - struct scic_sds_controller *controller, + struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index e485744e126..8a5203b6eb0 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -107,11 +107,11 @@ static void scic_sds_remote_node_context_construct_buffer( struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; - struct scic_sds_controller *scic; + struct isci_host *ihost; __le64 sas_addr; - scic = scic_sds_remote_device_get_controller(idev); - rnc = scic_sds_controller_get_remote_node_context_buffer(scic, rni); + ihost = scic_sds_remote_device_get_controller(idev); + rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) * scic_sds_remote_device_node_count(idev)); @@ -135,14 +135,14 @@ static void scic_sds_remote_node_context_construct_buffer( if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { rnc->ssp.connection_occupancy_timeout = - scic->user_parameters.sds1.stp_max_occupancy_timeout; + ihost->user_parameters.sds1.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - scic->user_parameters.sds1.stp_inactivity_timeout; + ihost->user_parameters.sds1.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = - scic->user_parameters.sds1.ssp_max_occupancy_timeout; + ihost->user_parameters.sds1.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - scic->user_parameters.sds1.ssp_inactivity_timeout; + ihost->user_parameters.sds1.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 90ead662828..36e674896bc 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -74,19 +74,19 @@ static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ire return &ireq->sg_table[idx - 2]; } -static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, +static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, struct isci_request *ireq, u32 idx) { u32 offset; if (idx == 0) { offset = (void *) &ireq->tc->sgl_pair_ab - - (void *) &scic->task_context_table[0]; - return scic->task_context_dma + offset; + (void *) &ihost->task_context_table[0]; + return ihost->task_context_dma + offset; } else if (idx == 1) { offset = (void *) &ireq->tc->sgl_pair_cd - - (void *) &scic->task_context_table[0]; - return scic->task_context_dma + offset; + (void *) &ihost->task_context_table[0]; + return ihost->task_context_dma + offset; } return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); @@ -102,8 +102,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) static void scic_sds_request_build_sgl(struct isci_request *ireq) { - struct isci_host *isci_host = ireq->isci_host; - struct scic_sds_controller *scic = &isci_host->sci; + struct isci_host *ihost = ireq->isci_host; struct sas_task *task = isci_request_access_task(ireq); struct scatterlist *sg = NULL; dma_addr_t dma_addr; @@ -125,7 +124,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) memset(&scu_sg->B, 0, sizeof(scu_sg->B)); if (prev_sg) { - dma_addr = to_sgl_element_pair_dma(scic, + dma_addr = to_sgl_element_pair_dma(ihost, ireq, sg_idx); @@ -141,7 +140,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) } else { /* handle when no sg */ scu_sg = to_sgl_element_pair(ireq, sg_idx); - dma_addr = dma_map_single(&isci_host->pdev->dev, + dma_addr = dma_map_single(&ihost->pdev->dev, task->scatter, task->total_xfer_len, task->data_dir); @@ -508,7 +507,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } else { - dev_err(scic_to_dev(ireq->owning_controller), + dev_err(&ireq->owning_controller->pdev->dev, "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n", __func__, ireq, tmf->tmf_code); @@ -518,7 +517,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, } if (!sas_protocol_ata(task->task_proto)) { - dev_err(scic_to_dev(ireq->owning_controller), + dev_err(&ireq->owning_controller->pdev->dev, "%s: Non-ATA protocol in SATA path: 0x%x\n", __func__, task->task_proto); @@ -616,7 +615,7 @@ enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) tmf->tmf_code == isci_tmf_sata_srst_low) { scu_stp_raw_request_construct_task_context(ireq); } else { - dev_err(scic_to_dev(ireq->owning_controller), + dev_err(&ireq->owning_controller->pdev->dev, "%s: Request 0x%p received un-handled SAT " "Protocol 0x%x.\n", __func__, ireq, tmf->tmf_code); @@ -639,11 +638,11 @@ enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) #define SCU_TASK_CONTEXT_SRAM 0x200000 static u32 sci_req_tx_bytes(struct isci_request *ireq) { - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; u32 ret_val = 0; - if (readl(&scic->smu_registers->address_modifier) == 0) { - void __iomem *scu_reg_base = scic->scu_registers; + if (readl(&ihost->smu_registers->address_modifier) == 0) { + void __iomem *scu_reg_base = ihost->scu_registers; /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where * BAR1 is the scu_registers @@ -663,11 +662,11 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq) { enum sci_base_request_states state; struct scu_task_context *tc = ireq->tc; - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request requested to start while in wrong " "state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; @@ -749,7 +748,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: - dev_warn(scic_to_dev(ireq->owning_controller), + dev_warn(&ireq->owning_controller->pdev->dev, "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, @@ -763,7 +762,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) enum sci_status scic_sds_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (WARN_ONCE(state != SCI_REQ_COMPLETED, @@ -771,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) return SCI_FAILURE_INVALID_STATE; if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) - scic_sds_controller_release_frame(scic, + scic_sds_controller_release_frame(ihost, ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ @@ -783,12 +782,12 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (state != SCI_REQ_STP_PIO_DATA_IN) { - dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", + dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", __func__, event_code, state); return SCI_FAILURE_INVALID_STATE; @@ -802,7 +801,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default: - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: pio request unexpected event %#x\n", __func__, event_code); @@ -1024,7 +1023,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq * There is a potential for receiving multiple task responses if * we decide to send the task IU again. */ - dev_warn(scic_to_dev(ireq->owning_controller), + dev_warn(&ireq->owning_controller->pdev->dev, "%s: TaskRequest:0x%p CompletionCode:%x - " "ACK/NAK timeout\n", __func__, ireq, completion_code); @@ -1073,7 +1072,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * response within 2 ms. This causes our hardware break * the connection and set TC completion with one of * these SMP_XXX_XX_ERR status. For these type of error, - * we ask scic user to retry the request. + * we ask ihost user to retry the request. */ scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); @@ -1451,18 +1450,18 @@ static void scic_sds_stp_request_udma_complete_request( static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; struct dev_to_host_fis *frame_header; enum sci_status status; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if ((status == SCI_SUCCESS) && (frame_header->fis_type == FIS_REGD2H)) { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1471,7 +1470,7 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is frame_buffer); } - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return status; } @@ -1480,7 +1479,7 @@ enum sci_status scic_sds_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) { - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; struct isci_stp_request *stp_req = &ireq->stp.req; enum sci_base_request_states state; enum sci_status status; @@ -1492,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_frame_hdr ssp_hdr; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1503,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_response_iu *resp_iu; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&resp_iu); @@ -1522,7 +1521,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, SCI_SUCCESS); } else { /* not a response frame, why did it get forwarded? */ - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p received unexpected " "frame %d type 0x%02x\n", __func__, ireq, frame_index, ssp_hdr.frame_type); @@ -1532,7 +1531,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * In any case we are done with this frame buffer return it to * the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } @@ -1540,14 +1539,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, case SCI_REQ_TASK_WAIT_TC_RESP: scic_sds_io_request_copy_response(ireq); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); - scic_sds_controller_release_frame(scic,frame_index); + scic_sds_controller_release_frame(ihost,frame_index); return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { struct smp_resp *rsp_hdr = &ireq->smp.rsp; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1558,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, if (rsp_hdr->frame_type == SMP_RESPONSE) { void *smp_resp; - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, &smp_resp); @@ -1577,7 +1576,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * This was not a response frame why did it get * forwarded? */ - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC SMP Request 0x%p received unexpected " "frame %d type 0x%02x\n", __func__, @@ -1592,7 +1591,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } @@ -1619,12 +1618,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, @@ -1637,7 +1636,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1651,7 +1650,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, break; default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: IO Request:0x%p Frame Id:%d protocol " "violation occurred\n", __func__, stp_req, frame_index); @@ -1664,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return status; } @@ -1674,12 +1673,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, stp_req, frame_index, status); @@ -1689,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_PIO_SETUP: /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1736,7 +1735,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * FIS when it is still busy? Do nothing since * we are still in the right state. */ - dev_dbg(scic_to_dev(scic), + dev_dbg(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received " "D2H Register FIS with BSY status " "0x%x\n", @@ -1746,7 +1745,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, break; } - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1767,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, } /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return status; } @@ -1776,12 +1775,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, @@ -1792,7 +1791,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, } if (frame_header->fis_type != FIS_DATA) { - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received frame %d " "with fis type 0x%02x when expecting a data " "fis.\n", @@ -1808,7 +1807,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return status; } @@ -1816,7 +1815,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, ireq->saved_rx_frame_index = frame_index; stp_req->pio_len = 0; } else { - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1824,7 +1823,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, (u8 *)frame_buffer); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); } /* Check for the end of the transfer, are there more @@ -1849,11 +1848,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, + status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { - dev_err(scic_to_dev(scic), + dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, @@ -1865,7 +1864,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, + scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1880,7 +1879,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, break; default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: IO Request:0x%p Frame Id:%d protocol " "violation occurred\n", __func__, @@ -1896,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return status; } @@ -1905,18 +1904,18 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * TODO: Is it even possible to get an unsolicited frame in the * aborting state? */ - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request given unexpected frame %x while " "in state %d\n", __func__, frame_index, state); - scic_sds_controller_release_frame(scic, frame_index); + scic_sds_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; } } @@ -2042,7 +2041,7 @@ scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) { enum sci_base_request_states state; - struct scic_sds_controller *scic = ireq->owning_controller; + struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; @@ -2089,7 +2088,7 @@ scic_sds_io_request_tc_completion(struct isci_request *ireq, completion_code); default: - dev_warn(scic_to_dev(scic), + dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request given task completion " "notification %x while in wrong state %d\n", __func__, @@ -2480,7 +2479,7 @@ static void isci_task_save_for_upper_layer_completion( } } -static void isci_request_io_request_complete(struct isci_host *isci_host, +static void isci_request_io_request_complete(struct isci_host *ihost, struct isci_request *request, enum sci_io_status completion_status) { @@ -2495,7 +2494,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, enum isci_completion_selection complete_to_host = isci_perform_normal_io_completion; - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: request = %p, task = %p,\n" "task->data_dir = %d completion_status = 0x%x\n", __func__, @@ -2616,7 +2615,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, switch (completion_status) { case SCI_IO_FAILURE_RESPONSE_VALID: - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", __func__, request, @@ -2631,17 +2630,17 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, /* crack the iu response buffer. */ resp_iu = &request->ssp.rsp; isci_request_process_response_iu(task, resp_iu, - &isci_host->pdev->dev); + &ihost->pdev->dev); } else if (SAS_PROTOCOL_SMP == task->task_proto) { - dev_err(&isci_host->pdev->dev, + dev_err(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_RESPONSE_VALID: " "SAS_PROTOCOL_SMP protocol\n", __func__); } else - dev_err(&isci_host->pdev->dev, + dev_err(&ihost->pdev->dev, "%s: unknown protocol\n", __func__); /* use the task status set in the task struct by the @@ -2662,7 +2661,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, if (task->task_proto == SAS_PROTOCOL_SMP) { void *rsp = &request->smp.rsp; - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: SMP protocol completion\n", __func__); @@ -2687,20 +2686,20 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, if (task->task_status.residual != 0) status = SAS_DATA_UNDERRUN; - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", __func__, status); } else - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", __func__); break; case SCI_IO_FAILURE_TERMINATED: - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", __func__, request, @@ -2768,7 +2767,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, default: /* Catch any otherwise unhandled error codes here. */ - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: invalid completion code: 0x%x - " "isci_request = %p\n", __func__, completion_status, request); @@ -2802,11 +2801,11 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, break; if (task->num_scatter == 0) /* 0 indicates a single dma address */ - dma_unmap_single(&isci_host->pdev->dev, + dma_unmap_single(&ihost->pdev->dev, request->zero_scatter_daddr, task->total_xfer_len, task->data_dir); else /* unmap the sgl dma addresses */ - dma_unmap_sg(&isci_host->pdev->dev, task->scatter, + dma_unmap_sg(&ihost->pdev->dev, task->scatter, request->num_sg_entries, task->data_dir); break; case SAS_PROTOCOL_SMP: { @@ -2814,7 +2813,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, struct smp_req *smp_req; void *kaddr; - dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); /* need to swab it back in case the command buffer is re-used */ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); @@ -2828,14 +2827,12 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, } /* Put the completed request on the correct list */ - isci_task_save_for_upper_layer_completion(isci_host, request, response, + isci_task_save_for_upper_layer_completion(ihost, request, response, status, complete_to_host ); /* complete the io request to the core. */ - scic_controller_complete_io(&isci_host->sci, - request->target_device, - request); + scic_controller_complete_io(ihost, request->target_device, request); isci_put_device(idev); /* set terminated handle so it cannot be completed or @@ -2885,8 +2882,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - struct scic_sds_controller *scic = ireq->owning_controller; - struct isci_host *ihost = scic_to_ihost(scic); + struct isci_host *ihost = ireq->owning_controller; /* Tell the SCI_USER that the IO request is complete */ if (!test_bit(IREQ_TMF, &ireq->flags)) @@ -2985,7 +2981,7 @@ static const struct sci_base_state scic_sds_request_state_table[] = { }; static void -scic_sds_general_request_construct(struct scic_sds_controller *scic, +scic_sds_general_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -3001,7 +2997,7 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, } static enum sci_status -scic_io_request_construct(struct scic_sds_controller *scic, +scic_io_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -3009,7 +3005,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, idev, ireq); + scic_sds_general_request_construct(ihost, idev, ireq); if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; @@ -3028,7 +3024,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, return status; } -enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, +enum sci_status scic_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq) { @@ -3036,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(scic, idev, ireq); + scic_sds_general_request_construct(ihost, idev, ireq); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -3156,7 +3152,7 @@ scic_io_request_construct_smp(struct device *dev, task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(scic); + scic_sds_controller_get_protocol_engine_group(ihost); task_context->logical_port_index = scic_sds_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->abort = 0; @@ -3199,7 +3195,7 @@ scic_io_request_construct_smp(struct device *dev, task_context->task_phase = 0; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(scic) << + (scic_sds_controller_get_protocol_engine_group(ihost) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | @@ -3245,7 +3241,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) /** * isci_io_request_build() - This function builds the io request object. - * @isci_host: This parameter specifies the ISCI host object + * @ihost: This parameter specifies the ISCI host object * @request: This parameter points to the isci_request object allocated in the * request construct function. * @sci_device: This parameter is the handle for the sci core's remote device @@ -3253,14 +3249,14 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) * * SCI_SUCCESS on successfull completion, or specific failure code. */ -static enum sci_status isci_io_request_build(struct isci_host *isci_host, +static enum sci_status isci_io_request_build(struct isci_host *ihost, struct isci_request *request, struct isci_remote_device *idev) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(request); - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: idev = 0x%p; request = %p, " "num_scatter = %d\n", __func__, @@ -3277,7 +3273,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, !(SAS_PROTOCOL_SMP & task->task_proto)) { request->num_sg_entries = dma_map_sg( - &isci_host->pdev->dev, + &ihost->pdev->dev, task->scatter, task->num_scatter, task->data_dir @@ -3287,10 +3283,10 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, return SCI_FAILURE_INSUFFICIENT_RESOURCES; } - status = scic_io_request_construct(&isci_host->sci, idev, request); + status = scic_io_request_construct(ihost, idev, request); if (status != SCI_SUCCESS) { - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: failed request construct\n", __func__); return SCI_FAILURE; @@ -3309,7 +3305,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, status = isci_request_stp_request_construct(request); break; default: - dev_warn(&isci_host->pdev->dev, + dev_warn(&ihost->pdev->dev, "%s: unknown protocol\n", __func__); return SCI_FAILURE; } @@ -3392,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide * request was built that way (ie. * ireq->is_task_management_request is false). */ - status = scic_controller_start_task(&ihost->sci, + status = scic_controller_start_task(ihost, idev, ireq); } else { @@ -3400,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide } } else { /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(&ihost->sci, idev, + status = scic_controller_start_io(ihost, idev, ireq); } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index ca64ea207ac..0cafcead7a0 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -145,7 +145,7 @@ struct isci_request { */ struct completion *io_request_completion; struct sci_base_state_machine sm; - struct scic_sds_controller *owning_controller; + struct isci_host *owning_controller; struct isci_remote_device *target_device; u16 io_tag; enum sci_request_protocol protocol; @@ -500,7 +500,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status -scic_task_request_construct(struct scic_sds_controller *scic, +scic_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 89b01eef44b..3a1fc55a755 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -257,7 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(&ihost->sci, idev, tag, + status = scic_task_request_construct(ihost, idev, tag, ireq); if (status != SCI_SUCCESS) { @@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ - status = scic_controller_start_task(&ihost->sci, idev, ireq); + status = scic_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); - scic_controller_terminate_request(&ihost->sci, + scic_controller_terminate_request(ihost, idev, ireq); @@ -514,15 +514,14 @@ static void isci_request_cleanup_completed_loiterer( * request, and wait for it to complete. This function must only be called * from a thread that can wait. Note that the request is terminated and * completed (back to the host, if started there). - * @isci_host: This SCU. + * @ihost: This SCU. * @idev: The target. * @isci_request: The I/O request to be terminated. * */ -static void isci_terminate_request_core( - struct isci_host *isci_host, - struct isci_remote_device *idev, - struct isci_request *isci_request) +static void isci_terminate_request_core(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *isci_request) { enum sci_status status = SCI_SUCCESS; bool was_terminated = false; @@ -533,11 +532,11 @@ static void isci_terminate_request_core( struct completion *io_request_completion; struct sas_task *task; - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: device = %p; request = %p\n", __func__, idev, isci_request); - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); io_request_completion = isci_request->io_request_completion; @@ -557,12 +556,11 @@ static void isci_terminate_request_core( if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; - status = scic_controller_terminate_request( - &isci_host->sci, - idev, - isci_request); + status = scic_controller_terminate_request(ihost, + idev, + isci_request); } - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); /* * The only time the request to terminate will @@ -570,7 +568,7 @@ static void isci_terminate_request_core( * being aborted. */ if (status != SCI_SUCCESS) { - dev_err(&isci_host->pdev->dev, + dev_err(&ihost->pdev->dev, "%s: scic_controller_terminate_request" " returned = 0x%x\n", __func__, status); @@ -579,7 +577,7 @@ static void isci_terminate_request_core( } else { if (was_terminated) { - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: before completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); @@ -593,7 +591,7 @@ static void isci_terminate_request_core( if (!termination_completed) { /* The request to terminate has timed out. */ - spin_lock_irqsave(&isci_host->scic_lock, + spin_lock_irqsave(&ihost->scic_lock, flags); /* Check for state changes. */ @@ -623,12 +621,12 @@ static void isci_terminate_request_core( } else termination_completed = 1; - spin_unlock_irqrestore(&isci_host->scic_lock, + spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!termination_completed) { - dev_err(&isci_host->pdev->dev, + dev_err(&ihost->pdev->dev, "%s: *** Timeout waiting for " "termination(%p/%p)\n", __func__, io_request_completion, @@ -642,7 +640,7 @@ static void isci_terminate_request_core( } } if (termination_completed) - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: after completion wait (%p/%p)\n", __func__, isci_request, io_request_completion); } @@ -678,7 +676,7 @@ static void isci_terminate_request_core( } if (needs_cleanup_handling) isci_request_cleanup_completed_loiterer( - isci_host, idev, isci_request, task); + ihost, idev, isci_request, task); } } @@ -1253,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq); + scic_controller_complete_io(ihost, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 680582d8cde..a0e6f89fc6a 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -57,9 +57,9 @@ #include "unsolicited_frame_control.h" #include "registers.h" -int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *scic) +int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) { - struct scic_sds_unsolicited_frame_control *uf_control = &scic->uf_control; + struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control; struct scic_sds_unsolicited_frame *uf; u32 buf_len, header_len, i; dma_addr_t dma; @@ -79,7 +79,7 @@ int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *sci * memory descriptor entry. The headers and address table will be * placed after the buffers. */ - virt = dmam_alloc_coherent(scic_to_dev(scic), size, &dma, GFP_KERNEL); + virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); if (!virt) return -ENOMEM; diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 2954904f025..c0285a3db56 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -214,9 +214,9 @@ struct scic_sds_unsolicited_frame_control { }; -struct scic_sds_controller; +struct isci_host; -int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *scic); +int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost); enum sci_status scic_sds_unsolicited_frame_control_get_header( struct scic_sds_unsolicited_frame_control *uf_control, -- cgit v1.2.3-70-g09d2 From 89a7301f21fb00e753089671eb9e4132aab8ea08 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Thu, 30 Jun 2011 19:14:33 -0700 Subject: isci: retire scic_sds_ and scic_ prefixes The distinction between scic_sds_ scic_ and sci_ are no longer relevant so just unify the prefixes on sci_. The distinction between isci_ and sci_ is historically significant, and useful for comparing the old 'core' to the current Linux driver. 'sci_' represents the former core as well as the routines that are closer to the hardware and protocol than their 'isci_' brethren. sci == sas controller interface. Also unwind the 'sds1' out of the parameter structs. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 742 ++++++++++---------------- drivers/scsi/isci/host.h | 93 ++-- drivers/scsi/isci/init.c | 4 +- drivers/scsi/isci/isci.h | 6 +- drivers/scsi/isci/phy.c | 385 ++++++------- drivers/scsi/isci/phy.h | 70 +-- drivers/scsi/isci/port.c | 551 +++++++------------ drivers/scsi/isci/port.h | 68 ++- drivers/scsi/isci/port_config.c | 132 ++--- drivers/scsi/isci/probe_roms.c | 16 +- drivers/scsi/isci/probe_roms.h | 38 +- drivers/scsi/isci/remote_device.c | 300 +++++------ drivers/scsi/isci/remote_device.h | 90 ++-- drivers/scsi/isci/remote_node_context.c | 198 ++++--- drivers/scsi/isci/remote_node_context.h | 30 +- drivers/scsi/isci/remote_node_table.c | 114 ++-- drivers/scsi/isci/remote_node_table.h | 16 +- drivers/scsi/isci/request.c | 360 ++++++------- drivers/scsi/isci/request.h | 52 +- drivers/scsi/isci/sata.c | 4 +- drivers/scsi/isci/task.c | 24 +- drivers/scsi/isci/unsolicited_frame_control.c | 57 +- drivers/scsi/isci/unsolicited_frame_control.h | 42 +- 23 files changed, 1449 insertions(+), 1943 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index bb298f8f609..f31f64e4b71 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -180,8 +180,7 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) handler(sm); } -static bool scic_sds_controller_completion_queue_has_entries( - struct isci_host *ihost) +static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) { u32 get_value = ihost->completion_queue_get; u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; @@ -193,9 +192,9 @@ static bool scic_sds_controller_completion_queue_has_entries( return false; } -static bool scic_sds_controller_isr(struct isci_host *ihost) +static bool sci_controller_isr(struct isci_host *ihost) { - if (scic_sds_controller_completion_queue_has_entries(ihost)) { + if (sci_controller_completion_queue_has_entries(ihost)) { return true; } else { /* @@ -219,13 +218,13 @@ irqreturn_t isci_msix_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_isr(ihost)) + if (sci_controller_isr(ihost)) tasklet_schedule(&ihost->completion_tasklet); return IRQ_HANDLED; } -static bool scic_sds_controller_error_isr(struct isci_host *ihost) +static bool sci_controller_error_isr(struct isci_host *ihost) { u32 interrupt_status; @@ -252,35 +251,35 @@ static bool scic_sds_controller_error_isr(struct isci_host *ihost) return false; } -static void scic_sds_controller_task_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) { - u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); + u32 index = SCU_GET_COMPLETION_INDEX(ent); struct isci_request *ireq = ihost->reqs[index]; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) - /* Yep this is a valid io request pass it along to the io request handler */ - scic_sds_io_request_tc_completion(ireq, completion_entry); + /* Yep this is a valid io request pass it along to the + * io request handler + */ + sci_io_request_tc_completion(ireq, ent); } -static void scic_sds_controller_sdma_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) { u32 index; struct isci_request *ireq; struct isci_remote_device *idev; - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); - switch (scu_get_command_request_type(completion_entry)) { + switch (scu_get_command_request_type(ent)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: ireq = ihost->reqs[index]; dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", - __func__, completion_entry, ireq); + __func__, ent, ireq); /* @todo For a post TC operation we need to fail the IO * request */ @@ -290,20 +289,19 @@ static void scic_sds_controller_sdma_completion(struct isci_host *ihost, case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: idev = ihost->device_table[index]; dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", - __func__, completion_entry, idev); + __func__, ent, idev); /* @todo For a port RNC operation we need to fail the * device */ break; default: dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", - __func__, completion_entry); + __func__, ent); break; } } -static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) { u32 index; u32 frame_index; @@ -314,36 +312,36 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, enum sci_status result = SCI_FAILURE; - frame_index = SCU_GET_FRAME_INDEX(completion_entry); + frame_index = SCU_GET_FRAME_INDEX(ent); frame_header = ihost->uf_control.buffers.array[frame_index].header; ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; - if (SCU_GET_FRAME_ERROR(completion_entry)) { + if (SCU_GET_FRAME_ERROR(ent)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will * / fail if there is an error in the frame. */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return; } if (frame_header->is_address_frame) { - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - result = scic_sds_phy_frame_handler(iphy, frame_index); + result = sci_phy_frame_handler(iphy, frame_index); } else { - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { /* * This is a signature fis or a frame from a direct attached SATA * device that has not yet been created. In either case forwared * the frame to the PE and let it take care of the frame data. */ - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - result = scic_sds_phy_frame_handler(iphy, frame_index); + result = sci_phy_frame_handler(iphy, frame_index); } else { if (index < ihost->remote_node_entries) idev = ihost->device_table[index]; @@ -351,9 +349,9 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, idev = NULL; if (idev != NULL) - result = scic_sds_remote_device_frame_handler(idev, frame_index); + result = sci_remote_device_frame_handler(idev, frame_index); else - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } } @@ -364,17 +362,16 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, } } -static void scic_sds_controller_event_completion(struct isci_host *ihost, - u32 completion_entry) +static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) { struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; u32 index; - index = SCU_GET_COMPLETION_INDEX(completion_entry); + index = SCU_GET_COMPLETION_INDEX(ent); - switch (scu_get_event_type(completion_entry)) { + switch (scu_get_event_type(ent)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */ dev_err(&ihost->pdev->dev, @@ -382,7 +379,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "0x%x\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_TYPE_SMU_PCQ_ERROR: @@ -396,21 +393,21 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "event 0x%x\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: ireq = ihost->reqs[index]; - scic_sds_io_request_event_handler(ireq, completion_entry); + sci_io_request_event_handler(ireq, ent); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: - switch (scu_get_event_specifier(completion_entry)) { + switch (scu_get_event_specifier(ent)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: ireq = ihost->reqs[index]; if (ireq != NULL) - scic_sds_io_request_event_handler(ireq, completion_entry); + sci_io_request_event_handler(ireq, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " @@ -418,14 +415,14 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "that doesnt exist.\n", __func__, ihost, - completion_entry); + ent); break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: idev = ihost->device_table[index]; if (idev != NULL) - scic_sds_remote_device_event_handler(idev, completion_entry); + sci_remote_device_event_handler(idev, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " @@ -433,7 +430,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "that doesnt exist.\n", __func__, ihost, - completion_entry); + ent); break; } @@ -448,9 +445,9 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, * direct error counter event to the phy object since that is where * we get the event notification. This is a type 4 event. */ case SCU_EVENT_TYPE_OSSP_EVENT: - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; - scic_sds_phy_event_handler(iphy, completion_entry); + sci_phy_event_handler(iphy, ent); break; case SCU_EVENT_TYPE_RNC_SUSPEND_TX: @@ -460,7 +457,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, idev = ihost->device_table[index]; if (idev != NULL) - scic_sds_remote_device_event_handler(idev, completion_entry); + sci_remote_device_event_handler(idev, ent); } else dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " @@ -468,7 +465,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, "exist.\n", __func__, ihost, - completion_entry, + ent, index); break; @@ -477,15 +474,15 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown event code %x\n", __func__, - completion_entry); + ent); break; } } -static void scic_sds_controller_process_completions(struct isci_host *ihost) +static void sci_controller_process_completions(struct isci_host *ihost) { u32 completion_count = 0; - u32 completion_entry; + u32 ent; u32 get_index; u32 get_cycle; u32 event_get; @@ -509,7 +506,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) ) { completion_count++; - completion_entry = ihost->completion_queue[get_index]; + ent = ihost->completion_queue[get_index]; /* increment the get pointer and check for rollover to toggle the cycle bit */ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << @@ -519,19 +516,19 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) dev_dbg(&ihost->pdev->dev, "%s: completion queue entry:0x%08x\n", __func__, - completion_entry); + ent); - switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { + switch (SCU_GET_COMPLETION_TYPE(ent)) { case SCU_COMPLETION_TYPE_TASK: - scic_sds_controller_task_completion(ihost, completion_entry); + sci_controller_task_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_SDMA: - scic_sds_controller_sdma_completion(ihost, completion_entry); + sci_controller_sdma_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_UFI: - scic_sds_controller_unsolicited_frame(ihost, completion_entry); + sci_controller_unsolicited_frame(ihost, ent); break; case SCU_COMPLETION_TYPE_EVENT: @@ -540,7 +537,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); event_get = (event_get+1) & (SCU_MAX_EVENTS-1); - scic_sds_controller_event_completion(ihost, completion_entry); + sci_controller_event_completion(ihost, ent); break; } default: @@ -548,7 +545,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) "%s: SCIC Controller received unknown " "completion type %x\n", __func__, - completion_entry); + ent); break; } } @@ -575,7 +572,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) } -static void scic_sds_controller_error_handler(struct isci_host *ihost) +static void sci_controller_error_handler(struct isci_host *ihost) { u32 interrupt_status; @@ -583,9 +580,9 @@ static void scic_sds_controller_error_handler(struct isci_host *ihost) readl(&ihost->smu_registers->interrupt_status); if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && - scic_sds_controller_completion_queue_has_entries(ihost)) { + sci_controller_completion_queue_has_entries(ihost)) { - scic_sds_controller_process_completions(ihost); + sci_controller_process_completions(ihost); writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); } else { dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, @@ -607,13 +604,13 @@ irqreturn_t isci_intx_isr(int vec, void *data) irqreturn_t ret = IRQ_NONE; struct isci_host *ihost = data; - if (scic_sds_controller_isr(ihost)) { + if (sci_controller_isr(ihost)) { writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); tasklet_schedule(&ihost->completion_tasklet); ret = IRQ_HANDLED; - } else if (scic_sds_controller_error_isr(ihost)) { + } else if (sci_controller_error_isr(ihost)) { spin_lock(&ihost->scic_lock); - scic_sds_controller_error_handler(ihost); + sci_controller_error_handler(ihost); spin_unlock(&ihost->scic_lock); ret = IRQ_HANDLED; } @@ -625,8 +622,8 @@ irqreturn_t isci_error_isr(int vec, void *data) { struct isci_host *ihost = data; - if (scic_sds_controller_error_isr(ihost)) - scic_sds_controller_error_handler(ihost); + if (sci_controller_error_isr(ihost)) + sci_controller_error_handler(ihost); return IRQ_HANDLED; } @@ -670,8 +667,8 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) } /** - * scic_controller_get_suggested_start_timeout() - This method returns the - * suggested scic_controller_start() timeout amount. The user is free to + * sci_controller_get_suggested_start_timeout() - This method returns the + * suggested sci_controller_start() timeout amount. The user is free to * use any timeout value, but this method provides the suggested minimum * start timeout value. The returned value is based upon empirical * information determined as a result of interoperability testing. @@ -681,7 +678,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) * This method returns the number of milliseconds for the suggested start * operation timeout. */ -static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) +static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) { /* Validate the user supplied parameters. */ if (!ihost) @@ -706,19 +703,19 @@ static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } -static void scic_controller_enable_interrupts(struct isci_host *ihost) +static void sci_controller_enable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0, &ihost->smu_registers->interrupt_mask); } -void scic_controller_disable_interrupts(struct isci_host *ihost) +void sci_controller_disable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0xffffffff, &ihost->smu_registers->interrupt_mask); } -static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) +static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) { u32 port_task_scheduler_value; @@ -731,7 +728,7 @@ static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *iho &ihost->scu_registers->peg0.ptsg.control); } -static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) +static void sci_controller_assign_task_entries(struct isci_host *ihost) { u32 task_assignment; @@ -752,7 +749,7 @@ static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) } -static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) +static void sci_controller_initialize_completion_queue(struct isci_host *ihost) { u32 index; u32 completion_queue_control_value; @@ -799,7 +796,7 @@ static void scic_sds_controller_initialize_completion_queue(struct isci_host *ih } } -static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) +static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) { u32 frame_queue_control_value; u32 frame_queue_get_value; @@ -826,22 +823,8 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_h &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } -/** - * This method will attempt to transition into the ready state for the - * controller and indicate that the controller start operation has completed - * if all criteria are met. - * @scic: This parameter indicates the controller object for which - * to transition to ready. - * @status: This parameter indicates the status value to be pass into the call - * to scic_cb_controller_start_complete(). - * - * none. - */ -static void scic_sds_controller_transition_to_ready( - struct isci_host *ihost, - enum sci_status status) +static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) { - if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports @@ -855,7 +838,7 @@ static void scic_sds_controller_transition_to_ready( static bool is_phy_starting(struct isci_phy *iphy) { - enum scic_sds_phy_states state; + enum sci_phy_states state; state = iphy->sm.current_state_id; switch (state) { @@ -876,16 +859,16 @@ static bool is_phy_starting(struct isci_phy *iphy) } /** - * scic_sds_controller_start_next_phy - start phy + * sci_controller_start_next_phy - start phy * @scic: controller * * If all the phys have been started, then attempt to transition the * controller to the READY state and inform the user - * (scic_cb_controller_start_complete()). + * (sci_cb_controller_start_complete()). */ -static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) +static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) { - struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; + struct sci_oem_params *oem = &ihost->oem_parameters; struct isci_phy *iphy; enum sci_status status; @@ -924,7 +907,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos * The controller has successfully finished the start process. * Inform the SCI Core user and transition to the READY state. */ if (is_controller_start_complete == true) { - scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; } @@ -944,11 +927,11 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos * incorrectly for the PORT or it was never * assigned to a PORT */ - return scic_sds_controller_start_next_phy(ihost); + return sci_controller_start_next_phy(ihost); } } - status = scic_sds_phy_start(iphy); + status = sci_phy_start(iphy); if (status == SCI_SUCCESS) { sci_mod_timer(&ihost->phy_timer, @@ -985,7 +968,7 @@ static void phy_startup_timeout(unsigned long data) ihost->phy_startup_timer_pending = false; do { - status = scic_sds_controller_start_next_phy(ihost); + status = sci_controller_start_next_phy(ihost); } while (status != SCI_SUCCESS); done: @@ -997,7 +980,7 @@ static u16 isci_tci_active(struct isci_host *ihost) return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } -static enum sci_status scic_controller_start(struct isci_host *ihost, +static enum sci_status sci_controller_start(struct isci_host *ihost, u32 timeout) { enum sci_status result; @@ -1018,38 +1001,37 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, isci_tci_free(ihost, index); /* Build the RNi free pool */ - scic_sds_remote_node_table_initialize( - &ihost->available_remote_nodes, - ihost->remote_node_entries); + sci_remote_node_table_initialize(&ihost->available_remote_nodes, + ihost->remote_node_entries); /* * Before anything else lets make sure we will not be * interrupted by the hardware. */ - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); /* Enable the port task scheduler */ - scic_sds_controller_enable_port_task_scheduler(ihost); + sci_controller_enable_port_task_scheduler(ihost); /* Assign all the task entries to ihost physical function */ - scic_sds_controller_assign_task_entries(ihost); + sci_controller_assign_task_entries(ihost); /* Now initialize the completion queue */ - scic_sds_controller_initialize_completion_queue(ihost); + sci_controller_initialize_completion_queue(ihost); /* Initialize the unsolicited frame queue for use */ - scic_sds_controller_initialize_unsolicited_frame_queue(ihost); + sci_controller_initialize_unsolicited_frame_queue(ihost); /* Start all of the ports on this controller */ for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; - result = scic_sds_port_start(iport); + result = sci_port_start(iport); if (result) return result; } - scic_sds_controller_start_next_phy(ihost); + sci_controller_start_next_phy(ihost); sci_mod_timer(&ihost->timer, timeout); @@ -1061,29 +1043,29 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, void isci_host_scan_start(struct Scsi_Host *shost) { struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; - unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); + unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); set_bit(IHOST_START_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_start(ihost, tmo); - scic_controller_enable_interrupts(ihost); + sci_controller_start(ihost, tmo); + sci_controller_enable_interrupts(ihost); spin_unlock_irq(&ihost->scic_lock); } static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) { isci_host_change_state(ihost, isci_stopped); - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); } -static void scic_sds_controller_completion_handler(struct isci_host *ihost) +static void sci_controller_completion_handler(struct isci_host *ihost) { /* Empty out the completion queue */ - if (scic_sds_controller_completion_queue_has_entries(ihost)) - scic_sds_controller_process_completions(ihost); + if (sci_controller_completion_queue_has_entries(ihost)) + sci_controller_process_completions(ihost); /* Clear the interrupt and enable all interrupts again */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); @@ -1116,7 +1098,7 @@ static void isci_host_completion_routine(unsigned long data) spin_lock_irq(&ihost->scic_lock); - scic_sds_controller_completion_handler(ihost); + sci_controller_completion_handler(ihost); /* Take the lists of completed I/Os from the host. */ @@ -1203,7 +1185,7 @@ static void isci_host_completion_routine(unsigned long data) } /** - * scic_controller_stop() - This method will stop an individual controller + * sci_controller_stop() - This method will stop an individual controller * object.This method will invoke the associated user callback upon * completion. The completion callback is called when the following * conditions are met: -# the method return status is SCI_SUCCESS. -# the @@ -1220,8 +1202,7 @@ static void isci_host_completion_routine(unsigned long data) * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states. */ -static enum sci_status scic_controller_stop(struct isci_host *ihost, - u32 timeout) +static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) { if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, @@ -1236,7 +1217,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, } /** - * scic_controller_reset() - This method will reset the supplied core + * sci_controller_reset() - This method will reset the supplied core * controller regardless of the state of said controller. This operation is * considered destructive. In other words, all current operations are wiped * out. No IO completions for outstanding devices occur. Outstanding IO @@ -1247,7 +1228,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete. */ -static enum sci_status scic_controller_reset(struct isci_host *ihost) +static enum sci_status sci_controller_reset(struct isci_host *ihost) { switch (ihost->sm.current_state_id) { case SCIC_RESET: @@ -1286,11 +1267,11 @@ void isci_host_deinit(struct isci_host *ihost) set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); - scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); + sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); - scic_controller_reset(ihost); + sci_controller_reset(ihost); /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { @@ -1329,11 +1310,8 @@ static void __iomem *smu_base(struct isci_host *isci_host) return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } -static void isci_user_parameters_get( - struct isci_host *isci_host, - union scic_user_parameters *scic_user_params) +static void isci_user_parameters_get(struct sci_user_parameters *u) { - struct scic_sds_user_parameters *u = &scic_user_params->sds1; int i; for (i = 0; i < SCI_MAX_PHYS; i++) { @@ -1355,14 +1333,14 @@ static void isci_user_parameters_get( u->max_number_concurrent_device_spin_up = max_concurr_spinup; } -static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_change_state(&ihost->sm, SCIC_RESET); } -static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) +static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); @@ -1377,7 +1355,7 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 /** - * scic_controller_set_interrupt_coalescence() - This method allows the user to + * sci_controller_set_interrupt_coalescence() - This method allows the user to * configure the interrupt coalescence. * @controller: This parameter represents the handle to the controller object * for which its interrupt coalesce register is overridden. @@ -1394,9 +1372,9 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. */ static enum sci_status -scic_controller_set_interrupt_coalescence(struct isci_host *ihost, - u32 coalesce_number, - u32 coalesce_timeout) +sci_controller_set_interrupt_coalescence(struct isci_host *ihost, + u32 coalesce_number, + u32 coalesce_timeout) { u8 timeout_encode = 0; u32 min = 0; @@ -1489,23 +1467,23 @@ scic_controller_set_interrupt_coalescence(struct isci_host *ihost, } -static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* set the default interrupt coalescence number and timeout value. */ - scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); + sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); } -static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* disable interrupt coalescence. */ - scic_controller_set_interrupt_coalescence(ihost, 0, 0); + sci_controller_set_interrupt_coalescence(ihost, 0, 0); } -static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) +static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) { u32 index; enum sci_status status; @@ -1514,7 +1492,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) status = SCI_SUCCESS; for (index = 0; index < SCI_MAX_PHYS; index++) { - phy_status = scic_sds_phy_stop(&ihost->phys[index]); + phy_status = sci_phy_stop(&ihost->phys[index]); if (phy_status != SCI_SUCCESS && phy_status != SCI_FAILURE_INVALID_STATE) { @@ -1531,7 +1509,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) return status; } -static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) +static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) { u32 index; enum sci_status port_status; @@ -1540,7 +1518,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; - port_status = scic_sds_port_stop(iport); + port_status = sci_port_stop(iport); if ((port_status != SCI_SUCCESS) && (port_status != SCI_FAILURE_INVALID_STATE)) { @@ -1558,7 +1536,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) return status; } -static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) +static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) { u32 index; enum sci_status status; @@ -1569,7 +1547,7 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) for (index = 0; index < ihost->remote_node_entries; index++) { if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ - device_status = scic_remote_device_stop(ihost->device_table[index], 0); + device_status = sci_remote_device_stop(ihost->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { @@ -1586,33 +1564,27 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) return status; } -static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* Stop all of the components for this controller */ - scic_sds_controller_stop_phys(ihost); - scic_sds_controller_stop_ports(ihost); - scic_sds_controller_stop_devices(ihost); + sci_controller_stop_phys(ihost); + sci_controller_stop_ports(ihost); + sci_controller_stop_devices(ihost); } -static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) +static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } - -/** - * scic_sds_controller_reset_hardware() - - * - * This method will reset the controller hardware. - */ -static void scic_sds_controller_reset_hardware(struct isci_host *ihost) +static void sci_controller_reset_hardware(struct isci_host *ihost) { /* Disable interrupts so we dont take any spurious interrupts */ - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); /* Reset the SCU */ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); @@ -1627,82 +1599,82 @@ static void scic_sds_controller_reset_hardware(struct isci_host *ihost) writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } -static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - scic_sds_controller_reset_hardware(ihost); + sci_controller_reset_hardware(ihost); sci_change_state(&ihost->sm, SCIC_RESET); } -static const struct sci_base_state scic_sds_controller_state_table[] = { +static const struct sci_base_state sci_controller_state_table[] = { [SCIC_INITIAL] = { - .enter_state = scic_sds_controller_initial_state_enter, + .enter_state = sci_controller_initial_state_enter, }, [SCIC_RESET] = {}, [SCIC_INITIALIZING] = {}, [SCIC_INITIALIZED] = {}, [SCIC_STARTING] = { - .exit_state = scic_sds_controller_starting_state_exit, + .exit_state = sci_controller_starting_state_exit, }, [SCIC_READY] = { - .enter_state = scic_sds_controller_ready_state_enter, - .exit_state = scic_sds_controller_ready_state_exit, + .enter_state = sci_controller_ready_state_enter, + .exit_state = sci_controller_ready_state_exit, }, [SCIC_RESETTING] = { - .enter_state = scic_sds_controller_resetting_state_enter, + .enter_state = sci_controller_resetting_state_enter, }, [SCIC_STOPPING] = { - .enter_state = scic_sds_controller_stopping_state_enter, - .exit_state = scic_sds_controller_stopping_state_exit, + .enter_state = sci_controller_stopping_state_enter, + .exit_state = sci_controller_stopping_state_exit, }, [SCIC_STOPPED] = {}, [SCIC_FAILED] = {} }; -static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) +static void sci_controller_set_default_config_parameters(struct isci_host *ihost) { /* these defaults are overridden by the platform / firmware */ u16 index; /* Default to APC mode. */ - ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; /* Default to APC mode. */ - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; + ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; /* Default to no SSC operation. */ - ihost->oem_parameters.sds1.controller.do_enable_ssc = false; + ihost->oem_parameters.controller.do_enable_ssc = false; /* Initialize all of the port parameter information to narrow ports. */ for (index = 0; index < SCI_MAX_PORTS; index++) { - ihost->oem_parameters.sds1.ports[index].phy_mask = 0; + ihost->oem_parameters.ports[index].phy_mask = 0; } /* Initialize all of the phy parameter information. */ for (index = 0; index < SCI_MAX_PHYS; index++) { /* Default to 6G (i.e. Gen 3) for now. */ - ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; + ihost->user_parameters.phys[index].max_speed_generation = 3; /* the frequencies cannot be 0 */ - ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; - ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; - ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; + ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; + ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; + ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; /* * Previous Vitesse based expanders had a arbitration issue that * is worked around by having the upper 32-bits of SAS address * with a value greater then the Vitesse company identifier. * Hence, usage of 0x5FCFFFFF. */ - ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; - ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; + ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; + ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; } - ihost->user_parameters.sds1.stp_inactivity_timeout = 5; - ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; - ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; - ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; - ihost->user_parameters.sds1.no_outbound_task_timeout = 20; + ihost->user_parameters.stp_inactivity_timeout = 5; + ihost->user_parameters.ssp_inactivity_timeout = 5; + ihost->user_parameters.stp_max_occupancy_timeout = 5; + ihost->user_parameters.ssp_max_occupancy_timeout = 20; + ihost->user_parameters.no_outbound_task_timeout = 20; } static void controller_timeout(unsigned long data) @@ -1718,7 +1690,7 @@ static void controller_timeout(unsigned long data) goto done; if (sm->current_state_id == SCIC_STARTING) - scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); + sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); @@ -1732,45 +1704,29 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -/** - * scic_controller_construct() - This method will attempt to construct a - * controller object utilizing the supplied parameter information. - * @c: This parameter specifies the controller to be constructed. - * @scu_base: mapped base address of the scu registers - * @smu_base: mapped base address of the smu registers - * - * Indicate if the controller was successfully constructed or if it failed in - * some way. SCI_SUCCESS This value is returned if the controller was - * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned - * if the interrupt coalescence timer may cause SAS compliance issues for SMP - * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE - * This value is returned if the controller does not support the supplied type. - * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the - * controller does not support the supplied initialization data version. - */ -static enum sci_status scic_controller_construct(struct isci_host *ihost, - void __iomem *scu_base, - void __iomem *smu_base) +static enum sci_status sci_controller_construct(struct isci_host *ihost, + void __iomem *scu_base, + void __iomem *smu_base) { u8 i; - sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); + sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); ihost->scu_registers = scu_base; ihost->smu_registers = smu_base; - scic_sds_port_configuration_agent_construct(&ihost->port_agent); + sci_port_configuration_agent_construct(&ihost->port_agent); /* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++) - scic_sds_port_construct(&ihost->ports[i], i, ihost); - scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); + sci_port_construct(&ihost->ports[i], i, ihost); + sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); /* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Add all the PHYs to the dummy port */ - scic_sds_phy_construct(&ihost->phys[i], - &ihost->ports[SCI_MAX_PORTS], i); + sci_phy_construct(&ihost->phys[i], + &ihost->ports[SCI_MAX_PORTS], i); } ihost->invalid_phy_mask = 0; @@ -1778,12 +1734,12 @@ static enum sci_status scic_controller_construct(struct isci_host *ihost, sci_init_timer(&ihost->timer, controller_timeout); /* Initialize the User and OEM parameters to default values. */ - scic_sds_controller_set_default_config_parameters(ihost); + sci_controller_set_default_config_parameters(ihost); - return scic_controller_reset(ihost); + return sci_controller_reset(ihost); } -int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) +int sci_oem_parameters_validate(struct sci_oem_params *oem) { int i; @@ -1817,8 +1773,7 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) return 0; } -static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, - union scic_oem_parameters *scic_parms) +static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) { u32 state = ihost->sm.current_state_id; @@ -1826,9 +1781,8 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, state == SCIC_INITIALIZING || state == SCIC_INITIALIZED) { - if (scic_oem_parameters_validate(&scic_parms->sds1)) + if (sci_oem_parameters_validate(&ihost->oem_parameters)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - ihost->oem_parameters.sds1 = scic_parms->sds1; return SCI_SUCCESS; } @@ -1836,13 +1790,6 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } -void scic_oem_parameters_get( - struct isci_host *ihost, - union scic_oem_parameters *scic_parms) -{ - memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms)); -} - static void power_control_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; @@ -1873,13 +1820,13 @@ static void power_control_timeout(unsigned long data) continue; if (ihost->power_control.phys_granted_power >= - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) break; ihost->power_control.requesters[i] = NULL; ihost->power_control.phys_waiting--; ihost->power_control.phys_granted_power++; - scic_sds_phy_consume_power_handler(iphy); + sci_phy_consume_power_handler(iphy); } /* @@ -1893,22 +1840,15 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -/** - * This method inserts the phy in the stagger spinup control queue. - * @scic: - * - * - */ -void scic_sds_controller_power_control_queue_insert( - struct isci_host *ihost, - struct isci_phy *iphy) +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.phys_granted_power < - ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { ihost->power_control.phys_granted_power++; - scic_sds_phy_consume_power_handler(iphy); + sci_phy_consume_power_handler(iphy); /* * stop and start the power_control timer. When the timer fires, the @@ -1928,21 +1868,13 @@ void scic_sds_controller_power_control_queue_insert( } } -/** - * This method removes the phy from the stagger spinup control queue. - * @scic: - * - * - */ -void scic_sds_controller_power_control_queue_remove( - struct isci_host *ihost, - struct isci_phy *iphy) +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy) { BUG_ON(iphy == NULL); - if (ihost->power_control.requesters[iphy->phy_index] != NULL) { + if (ihost->power_control.requesters[iphy->phy_index]) ihost->power_control.phys_waiting--; - } ihost->power_control.requesters[iphy->phy_index] = NULL; } @@ -1952,9 +1884,9 @@ void scic_sds_controller_power_control_queue_remove( /* Initialize the AFE for this phy index. We need to read the AFE setup from * the OEM parameters */ -static void scic_sds_controller_afe_initialization(struct isci_host *ihost) +static void sci_controller_afe_initialization(struct isci_host *ihost) { - const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; + const struct sci_oem_params *oem = &ihost->oem_parameters; u32 afe_status; u32 phy_id; @@ -2111,7 +2043,7 @@ static void scic_sds_controller_afe_initialization(struct isci_host *ihost) udelay(AFE_REGISTER_WRITE_DELAY); } -static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) +static void sci_controller_initialize_power_control(struct isci_host *ihost) { sci_init_timer(&ihost->power_control.timer, power_control_timeout); @@ -2122,7 +2054,7 @@ static void scic_sds_controller_initialize_power_control(struct isci_host *ihost ihost->power_control.phys_granted_power = 0; } -static enum sci_status scic_controller_initialize(struct isci_host *ihost) +static enum sci_status sci_controller_initialize(struct isci_host *ihost) { struct sci_base_state_machine *sm = &ihost->sm; enum sci_status result = SCI_FAILURE; @@ -2142,14 +2074,14 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) ihost->next_phy_to_start = 0; ihost->phy_startup_timer_pending = false; - scic_sds_controller_initialize_power_control(ihost); + sci_controller_initialize_power_control(ihost); /* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but * / presently they seem to be wrong. */ - scic_sds_controller_afe_initialization(ihost); + sci_controller_afe_initialization(ihost); /* Take the hardware out of reset */ @@ -2206,24 +2138,22 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) * are accessed during the port initialization. */ for (i = 0; i < SCI_MAX_PHYS; i++) { - result = scic_sds_phy_initialize(&ihost->phys[i], - &ihost->scu_registers->peg0.pe[i].tl, - &ihost->scu_registers->peg0.pe[i].ll); + result = sci_phy_initialize(&ihost->phys[i], + &ihost->scu_registers->peg0.pe[i].tl, + &ihost->scu_registers->peg0.pe[i].ll); if (result != SCI_SUCCESS) goto out; } for (i = 0; i < ihost->logical_port_entries; i++) { - result = scic_sds_port_initialize(&ihost->ports[i], - &ihost->scu_registers->peg0.ptsg.port[i], - &ihost->scu_registers->peg0.ptsg.protocol_engine, - &ihost->scu_registers->peg0.viit[i]); + struct isci_port *iport = &ihost->ports[i]; - if (result != SCI_SUCCESS) - goto out; + iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; + iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; + iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; } - result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); + result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); out: /* Advance the controller state machine */ @@ -2236,9 +2166,8 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) return result; } -static enum sci_status scic_user_parameters_set( - struct isci_host *ihost, - union scic_user_parameters *scic_parms) +static enum sci_status sci_user_parameters_set(struct isci_host *ihost, + struct sci_user_parameters *sci_parms) { u32 state = ihost->sm.current_state_id; @@ -2254,7 +2183,7 @@ static enum sci_status scic_user_parameters_set( for (index = 0; index < SCI_MAX_PHYS; index++) { struct sci_phy_user_params *user_phy; - user_phy = &scic_parms->sds1.phys[index]; + user_phy = &sci_parms->phys[index]; if (!((user_phy->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && @@ -2275,14 +2204,14 @@ static enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } - if ((scic_parms->sds1.stp_inactivity_timeout == 0) || - (scic_parms->sds1.ssp_inactivity_timeout == 0) || - (scic_parms->sds1.stp_max_occupancy_timeout == 0) || - (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || - (scic_parms->sds1.no_outbound_task_timeout == 0)) + if ((sci_parms->stp_inactivity_timeout == 0) || + (sci_parms->ssp_inactivity_timeout == 0) || + (sci_parms->stp_max_occupancy_timeout == 0) || + (sci_parms->ssp_max_occupancy_timeout == 0) || + (sci_parms->no_outbound_task_timeout == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; - memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); + memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); return SCI_SUCCESS; } @@ -2290,7 +2219,7 @@ static enum sci_status scic_user_parameters_set( return SCI_FAILURE_INVALID_STATE; } -static int scic_controller_mem_init(struct isci_host *ihost) +static int sci_controller_mem_init(struct isci_host *ihost) { struct device *dev = &ihost->pdev->dev; dma_addr_t dma; @@ -2307,7 +2236,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, - GFP_KERNEL); + GFP_KERNEL); if (!ihost->remote_node_context_table) return -ENOMEM; @@ -2323,7 +2252,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); - err = scic_sds_unsolicited_frame_control_construct(ihost); + err = sci_unsolicited_frame_control_construct(ihost); if (err) return err; @@ -2348,8 +2277,7 @@ int isci_host_init(struct isci_host *ihost) { int err = 0, i; enum sci_status status; - union scic_oem_parameters oem; - union scic_user_parameters scic_user_params; + struct sci_user_parameters sci_user_params; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); spin_lock_init(&ihost->state_lock); @@ -2358,12 +2286,12 @@ int isci_host_init(struct isci_host *ihost) isci_host_change_state(ihost, isci_starting); - status = scic_controller_construct(ihost, scu_base(ihost), - smu_base(ihost)); + status = sci_controller_construct(ihost, scu_base(ihost), + smu_base(ihost)); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, - "%s: scic_controller_construct failed - status = %x\n", + "%s: sci_controller_construct failed - status = %x\n", __func__, status); return -ENODEV; @@ -2376,21 +2304,18 @@ int isci_host_init(struct isci_host *ihost) * grab initial values stored in the controller object for OEM and USER * parameters */ - isci_user_parameters_get(ihost, &scic_user_params); - status = scic_user_parameters_set(ihost, - &scic_user_params); + isci_user_parameters_get(&sci_user_params); + status = sci_user_parameters_set(ihost, &sci_user_params); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_user_parameters_set failed\n", + "%s: sci_user_parameters_set failed\n", __func__); return -ENODEV; } - scic_oem_parameters_get(ihost, &oem); - /* grab any OEM parameters specified in orom */ if (pci_info->orom) { - status = isci_parse_oem_parameters(&oem, + status = isci_parse_oem_parameters(&ihost->oem_parameters, pci_info->orom, ihost->id); if (status != SCI_SUCCESS) { @@ -2400,10 +2325,10 @@ int isci_host_init(struct isci_host *ihost) } } - status = scic_oem_parameters_set(ihost, &oem); + status = sci_oem_parameters_set(ihost); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_oem_parameters_set failed\n", + "%s: sci_oem_parameters_set failed\n", __func__); return -ENODEV; } @@ -2415,17 +2340,17 @@ int isci_host_init(struct isci_host *ihost) INIT_LIST_HEAD(&ihost->requests_to_errorback); spin_lock_irq(&ihost->scic_lock); - status = scic_controller_initialize(ihost); + status = sci_controller_initialize(ihost); spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_controller_initialize failed -" + "%s: sci_controller_initialize failed -" " status = 0x%x\n", __func__, status); return -ENODEV; } - err = scic_controller_mem_init(ihost); + err = sci_controller_mem_init(ihost); if (err) return err; @@ -2463,20 +2388,20 @@ int isci_host_init(struct isci_host *ihost) return 0; } -void scic_sds_controller_link_up(struct isci_host *ihost, - struct isci_port *iport, struct isci_phy *iphy) +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, - iport, iphy); - scic_sds_controller_start_next_phy(ihost); + iport, iphy); + sci_controller_start_next_phy(ihost); break; case SCIC_READY: ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, - iport, iphy); + iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, @@ -2486,8 +2411,8 @@ void scic_sds_controller_link_up(struct isci_host *ihost, } } -void scic_sds_controller_link_down(struct isci_host *ihost, - struct isci_port *iport, struct isci_phy *iphy) +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: @@ -2505,12 +2430,7 @@ void scic_sds_controller_link_down(struct isci_host *ihost, } } -/** - * This is a helper method to determine if any remote devices on this - * controller are still in the stopping state. - * - */ -static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost) +static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; @@ -2523,12 +2443,8 @@ static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ih return false; } -/** - * This method is called by the remote device to inform the controller - * object that the remote device has stopped. - */ -void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, - struct isci_remote_device *idev) +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev) { if (ihost->sm.current_state_id != SCIC_STOPPING) { dev_dbg(&ihost->pdev->dev, @@ -2539,32 +2455,19 @@ void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, return; } - if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { + if (!sci_controller_has_remote_devices_stopping(ihost)) sci_change_state(&ihost->sm, SCIC_STOPPED); - } } -/** - * This method will write to the SCU PCP register the request value. The method - * is used to suspend/resume ports, devices, and phys. - * @scic: - * - * - */ -void scic_sds_controller_post_request( - struct isci_host *ihost, - u32 request) +void sci_controller_post_request(struct isci_host *ihost, u32 request) { - dev_dbg(&ihost->pdev->dev, - "%s: SCIC Controller 0x%p post request 0x%08x\n", - __func__, - ihost, - request); + dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", + __func__, ihost->id, request); writel(request, &ihost->smu_registers->post_context_port); } -struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) { u16 task_index; u16 task_sequence; @@ -2599,15 +2502,14 @@ struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote * node index available. */ -enum sci_status scic_sds_controller_allocate_remote_node_context( - struct isci_host *ihost, - struct isci_remote_device *idev, - u16 *node_id) +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id) { u16 node_index; - u32 remote_node_count = scic_sds_remote_device_node_count(idev); + u32 remote_node_count = sci_remote_device_node_count(idev); - node_index = scic_sds_remote_node_table_allocate_remote_node( + node_index = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, remote_node_count ); @@ -2622,68 +2524,26 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( return SCI_FAILURE_INSUFFICIENT_RESOURCES; } -/** - * This method frees the remote node index back to the available pool. Once - * this is done the remote node context buffer is no longer valid and can - * not be used. - * @scic: - * @sci_dev: - * @node_id: - * - */ -void scic_sds_controller_free_remote_node_context( - struct isci_host *ihost, - struct isci_remote_device *idev, - u16 node_id) +void sci_controller_free_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id) { - u32 remote_node_count = scic_sds_remote_device_node_count(idev); + u32 remote_node_count = sci_remote_device_node_count(idev); if (ihost->device_table[node_id] == idev) { ihost->device_table[node_id] = NULL; - scic_sds_remote_node_table_release_remote_node_index( + sci_remote_node_table_release_remote_node_index( &ihost->available_remote_nodes, remote_node_count, node_id ); } } -/** - * This method returns the union scu_remote_node_context for the specified remote - * node id. - * @scic: - * @node_id: - * - * union scu_remote_node_context* - */ -union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct isci_host *ihost, - u16 node_id - ) { - if ( - (node_id < ihost->remote_node_entries) - && (ihost->device_table[node_id] != NULL) - ) { - return &ihost->remote_node_context_table[node_id]; - } - - return NULL; -} - -/** - * - * @resposne_buffer: This is the buffer into which the D2H register FIS will be - * constructed. - * @frame_header: This is the frame header returned by the hardware. - * @frame_buffer: This is the frame buffer returned by the hardware. - * - * This method will combind the frame header and frame buffer to create a SATA - * D2H register FIS none - */ -void scic_sds_controller_copy_sata_response( - void *response_buffer, - void *frame_header, - void *frame_buffer) +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer) { + /* XXX type safety? */ memcpy(response_buffer, frame_header, sizeof(u32)); memcpy(response_buffer + sizeof(u32), @@ -2691,21 +2551,9 @@ void scic_sds_controller_copy_sata_response( sizeof(struct dev_to_host_fis) - sizeof(u32)); } -/** - * This method releases the frame once this is done the frame is available for - * re-use by the hardware. The data contained in the frame header and frame - * buffer is no longer valid. The UF queue get pointer is only updated if UF - * control indicates this is appropriate. - * @scic: - * @frame_index: - * - */ -void scic_sds_controller_release_frame( - struct isci_host *ihost, - u32 frame_index) +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) { - if (scic_sds_unsolicited_frame_control_release_frame( - &ihost->uf_control, frame_index) == true) + if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) writel(ihost->uf_control.get, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } @@ -2763,21 +2611,9 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) return SCI_FAILURE_INVALID_IO_TAG; } -/** - * scic_controller_start_io() - This method is called by the SCI user to - * send/start an IO request. If the method invocation is successful, then - * the IO request has been queued to the hardware for processing. - * @controller: the handle to the controller object for which to start an IO - * request. - * @remote_device: the handle to the remote device object for which to start an - * IO request. - * @io_request: the handle to the io request object to start. - * @io_tag: This parameter specifies a previously allocated IO tag that the - * user desires to be utilized for this request. - */ -enum sci_status scic_controller_start_io(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2786,36 +2622,23 @@ enum sci_status scic_controller_start_io(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_io(ihost, idev, ireq); + status = sci_remote_device_start_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); return SCI_SUCCESS; } -/** - * scic_controller_terminate_request() - This method is called by the SCI Core - * user to terminate an ongoing (i.e. started) core IO request. This does - * not abort the IO request at the target, but rather removes the IO request - * from the host controller. - * @controller: the handle to the controller object for which to terminate a - * request. - * @remote_device: the handle to the remote device object for which to - * terminate a request. - * @request: the handle to the io or task management request object to - * terminate. - * - * Indicate if the controller successfully began the terminate process for the - * IO request. SCI_SUCCESS if the terminate process was successfully started - * for the request. Determine the failure situations and return values. - */ -enum sci_status scic_controller_terminate_request( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_terminate_request(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { + /* terminate an ongoing (i.e. started) core IO request. This does not + * abort the IO request at the target, but rather removes the IO + * request from the host controller. + */ enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { @@ -2824,7 +2647,7 @@ enum sci_status scic_controller_terminate_request( return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_io_request_terminate(ireq); + status = sci_io_request_terminate(ireq); if (status != SCI_SUCCESS) return status; @@ -2832,27 +2655,25 @@ enum sci_status scic_controller_terminate_request( * Utilize the original post context command and or in the POST_TC_ABORT * request sub-type. */ - scic_sds_controller_post_request(ihost, - scic_sds_request_get_post_context(ireq) | - SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); + sci_controller_post_request(ihost, + ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); return SCI_SUCCESS; } /** - * scic_controller_complete_io() - This method will perform core specific + * sci_controller_complete_io() - This method will perform core specific * completion operations for an IO request. After this method is invoked, * the user should consider the IO request as invalid until it is properly * reused (i.e. re-constructed). - * @controller: The handle to the controller object for which to complete the + * @ihost: The handle to the controller object for which to complete the * IO request. - * @remote_device: The handle to the remote device object for which to complete + * @idev: The handle to the remote device object for which to complete * the IO request. - * @io_request: the handle to the io request object to complete. + * @ireq: the handle to the io request object to complete. */ -enum sci_status scic_controller_complete_io( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; u16 index; @@ -2862,7 +2683,7 @@ enum sci_status scic_controller_complete_io( /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: - status = scic_sds_remote_device_complete_io(ihost, idev, ireq); + status = sci_remote_device_complete_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; @@ -2876,7 +2697,7 @@ enum sci_status scic_controller_complete_io( } -enum sci_status scic_controller_continue_io(struct isci_request *ireq) +enum sci_status sci_controller_continue_io(struct isci_request *ireq) { struct isci_host *ihost = ireq->owning_controller; @@ -2886,12 +2707,12 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) } set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); return SCI_SUCCESS; } /** - * scic_controller_start_task() - This method is called by the SCIC user to + * sci_controller_start_task() - This method is called by the SCIC user to * send/start a framework task management request. * @controller: the handle to the controller object for which to start the task * management request. @@ -2899,10 +2720,9 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) * the task management request. * @task_request: the handle to the task request object to start. */ -enum sci_task_status scic_controller_start_task( - struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_task_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2914,7 +2734,7 @@ enum sci_task_status scic_controller_start_task( return SCI_TASK_FAILURE_INVALID_STATE; } - status = scic_sds_remote_device_start_task(ihost, idev, ireq); + status = sci_remote_device_start_task(ihost, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); @@ -2928,8 +2748,8 @@ enum sci_task_status scic_controller_start_task( case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); - scic_sds_controller_post_request(ihost, - scic_sds_request_get_post_context(ireq)); + sci_controller_post_request(ihost, + sci_request_get_post_context(ireq)); break; default: break; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 013f672a8fd..d87f21de180 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -69,12 +69,12 @@ struct scu_task_context; /** - * struct scic_power_control - + * struct sci_power_control - * * This structure defines the fields for managing power control for direct * attached disk devices. */ -struct scic_power_control { +struct sci_power_control { /** * This field is set when the power control timer is running and cleared when * it is not. @@ -99,18 +99,18 @@ struct scic_power_control { /** * This field is an array of phys that we are waiting on. The phys are direct - * mapped into requesters via struct scic_sds_phy.phy_index + * mapped into requesters via struct sci_phy.phy_index */ struct isci_phy *requesters[SCI_MAX_PHYS]; }; -struct scic_sds_port_configuration_agent; +struct sci_port_configuration_agent; typedef void (*port_config_fn)(struct isci_host *, - struct scic_sds_port_configuration_agent *, + struct sci_port_configuration_agent *, struct isci_port *, struct isci_phy *); -struct scic_sds_port_configuration_agent { +struct sci_port_configuration_agent { u16 phy_configured_mask; u16 phy_ready_mask; struct { @@ -149,13 +149,13 @@ struct isci_host { /* XXX can we time this externally */ struct sci_timer timer; /* XXX drop reference module params directly */ - union scic_user_parameters user_parameters; + struct sci_user_parameters user_parameters; /* XXX no need to be a union */ - union scic_oem_parameters oem_parameters; - struct scic_sds_port_configuration_agent port_agent; + struct sci_oem_params oem_parameters; + struct sci_port_configuration_agent port_agent; struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; - struct scic_remote_node_table available_remote_nodes; - struct scic_power_control power_control; + struct sci_remote_node_table available_remote_nodes; + struct sci_power_control power_control; u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; struct scu_task_context *task_context_table; dma_addr_t task_context_dma; @@ -165,7 +165,7 @@ struct isci_host { u32 logical_port_entries; u32 remote_node_entries; u32 task_context_entries; - struct scic_sds_unsolicited_frame_control uf_control; + struct sci_unsolicited_frame_control uf_control; /* phy startup */ struct sci_timer phy_timer; @@ -206,10 +206,10 @@ struct isci_host { }; /** - * enum scic_sds_controller_states - This enumeration depicts all the states + * enum sci_controller_states - This enumeration depicts all the states * for the common controller state machine. */ -enum scic_sds_controller_states { +enum sci_controller_states { /** * Simply the initial state for the base controller state machine. */ @@ -360,14 +360,14 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) } /** - * scic_sds_controller_get_protocol_engine_group() - + * sci_controller_get_protocol_engine_group() - * * This macro returns the protocol engine group for this controller object. * Presently we only support protocol engine group 0 so just return that */ -#define scic_sds_controller_get_protocol_engine_group(controller) 0 +#define sci_controller_get_protocol_engine_group(controller) 0 -/* see scic_controller_io_tag_allocate|free for how seq and tci are built */ +/* see sci_controller_io_tag_allocate|free for how seq and tci are built */ #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) /* these are returned by the hardware, so sanitize them */ @@ -375,7 +375,7 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) /* expander attached sata devices require 3 rnc slots */ -static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev) +static inline int sci_remote_device_node_count(struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; @@ -386,23 +386,23 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i } /** - * scic_sds_controller_set_invalid_phy() - + * sci_controller_set_invalid_phy() - * * This macro will set the bit in the invalid phy mask for this controller * object. This is used to control messages reported for invalid link up * notifications. */ -#define scic_sds_controller_set_invalid_phy(controller, phy) \ +#define sci_controller_set_invalid_phy(controller, phy) \ ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) /** - * scic_sds_controller_clear_invalid_phy() - + * sci_controller_clear_invalid_phy() - * * This macro will clear the bit in the invalid phy mask for this controller * object. This is used to control messages reported for invalid link up * notifications. */ -#define scic_sds_controller_clear_invalid_phy(controller, phy) \ +#define sci_controller_clear_invalid_phy(controller, phy) \ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) static inline struct device *sciphy_to_dev(struct isci_phy *iphy) @@ -460,56 +460,53 @@ static inline bool is_c0(void) return isci_si_rev > ISCI_SI_REVB0; } -void scic_sds_controller_post_request(struct isci_host *ihost, +void sci_controller_post_request(struct isci_host *ihost, u32 request); -void scic_sds_controller_release_frame(struct isci_host *ihost, +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index); -void scic_sds_controller_copy_sata_response(void *response_buffer, +void sci_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer); -enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost, +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id); -void scic_sds_controller_free_remote_node_context( +void sci_controller_free_remote_node_context( struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id); -union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( - struct isci_host *ihost, - u16 node_id); -struct isci_request *scic_request_by_tag(struct isci_host *ihost, +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); -void scic_sds_controller_power_control_queue_insert( +void sci_controller_power_control_queue_insert( struct isci_host *ihost, struct isci_phy *iphy); -void scic_sds_controller_power_control_queue_remove( +void sci_controller_power_control_queue_remove( struct isci_host *ihost, struct isci_phy *iphy); -void scic_sds_controller_link_up( +void sci_controller_link_up( struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_controller_link_down( +void sci_controller_link_down( struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_controller_remote_device_stopped( +void sci_controller_remote_device_stopped( struct isci_host *ihost, struct isci_remote_device *idev); -void scic_sds_controller_copy_task_context( +void sci_controller_copy_task_context( struct isci_host *ihost, struct isci_request *ireq); -void scic_sds_controller_register_setup(struct isci_host *ihost); +void sci_controller_register_setup(struct isci_host *ihost); -enum sci_status scic_controller_continue_io(struct isci_request *ireq); +enum sci_status sci_controller_continue_io(struct isci_request *ireq); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); void isci_host_scan_start(struct Scsi_Host *); u16 isci_alloc_tag(struct isci_host *ihost); @@ -536,33 +533,33 @@ void isci_host_remote_device_start_complete( struct isci_remote_device *, enum sci_status); -void scic_controller_disable_interrupts( +void sci_controller_disable_interrupts( struct isci_host *ihost); -enum sci_status scic_controller_start_io( +enum sci_status sci_controller_start_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_task_status scic_controller_start_task( +enum sci_task_status sci_controller_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_controller_terminate_request( +enum sci_status sci_controller_terminate_request( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_controller_complete_io( +enum sci_status sci_controller_complete_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -void scic_sds_port_configuration_agent_construct( - struct scic_sds_port_configuration_agent *port_agent); +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent); -enum sci_status scic_sds_port_configuration_agent_initialize( +enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent); + struct sci_port_configuration_agent *port_agent); #endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68ca1a4f30a..8d9a8bfff4d 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -484,7 +484,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic orom = isci_request_oprom(pdev); for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { - if (scic_oem_parameters_validate(&orom->ctrl[i])) { + if (sci_oem_parameters_validate(&orom->ctrl[i])) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); devm_kfree(&pdev->dev, orom); @@ -554,7 +554,7 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev) for_each_isci_host(i, ihost, pdev) { isci_unregister(ihost); isci_host_deinit(ihost); - scic_controller_disable_interrupts(ihost); + sci_controller_disable_interrupts(ihost); } } diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 207328369ed..3afccfcb94e 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -304,7 +304,7 @@ enum sci_status { * This member indicates that the operation failed, the failure is * controller implementation specific, and the response data associated * with the request is not valid. You can query for the controller - * specific error information via scic_controller_get_request_status() + * specific error information via sci_controller_get_request_status() */ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, @@ -395,7 +395,7 @@ enum sci_status { /** * This value indicates that an unsupported PCI device ID has been * specified. This indicates that attempts to invoke - * scic_library_allocate_controller() will fail. + * sci_library_allocate_controller() will fail. */ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID @@ -493,7 +493,7 @@ irqreturn_t isci_error_isr(int vec, void *data); /* * Each timer is associated with a cancellation flag that is set when * del_timer() is called and checked in the timer callback function. This - * is needed since del_timer_sync() cannot be called with scic_lock held. + * is needed since del_timer_sync() cannot be called with sci_lock held. * For deinit however, del_timer_sync() is used without holding the lock. */ struct sci_timer { diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index ca96b5ad0d5..0df9f713f48 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -67,25 +67,13 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) return iphy->max_negotiated_speed; } -/* - * ***************************************************************************** - * * SCIC SDS PHY Internal Methods - * ***************************************************************************** */ - -/** - * This method will initialize the phy transport layer registers - * @sci_phy: - * @transport_layer_registers - * - * enum sci_status - */ -static enum sci_status scic_sds_phy_transport_layer_initialization( - struct isci_phy *iphy, - struct scu_transport_layer_registers __iomem *transport_layer_registers) +static enum sci_status +sci_phy_transport_layer_initialization(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *reg) { u32 tl_control; - iphy->transport_layer_registers = transport_layer_registers; + iphy->transport_layer_registers = reg; writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, &iphy->transport_layer_registers->stp_rni); @@ -101,32 +89,23 @@ static enum sci_status scic_sds_phy_transport_layer_initialization( return SCI_SUCCESS; } -/** - * This method will initialize the phy link layer registers - * @sci_phy: - * @link_layer_registers: - * - * enum sci_status - */ static enum sci_status -scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, - struct scu_link_layer_registers __iomem *link_layer_registers) +sci_phy_link_layer_initialization(struct isci_phy *iphy, + struct scu_link_layer_registers __iomem *reg) { - struct isci_host *ihost = - iphy->owning_port->owning_controller; + struct isci_host *ihost = iphy->owning_port->owning_controller; int phy_idx = iphy->phy_index; - struct sci_phy_user_params *phy_user = - &ihost->user_parameters.sds1.phys[phy_idx]; + struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; struct sci_phy_oem_params *phy_oem = - &ihost->oem_parameters.sds1.phys[phy_idx]; + &ihost->oem_parameters.phys[phy_idx]; u32 phy_configuration; - struct scic_phy_cap phy_cap; + struct sci_phy_cap phy_cap; u32 parity_check = 0; u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; - iphy->link_layer_registers = link_layer_registers; + iphy->link_layer_registers = reg; /* Set our IDENTIFY frame data */ #define SCI_END_DEVICE 0x01 @@ -169,7 +148,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; - if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) { + if (ihost->oem_parameters.controller.do_enable_ssc == true) { phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; @@ -216,7 +195,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, &iphy->link_layer_registers->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, - (u8)ihost->user_parameters.sds1.no_outbound_task_timeout); + (u8)ihost->user_parameters.no_outbound_task_timeout); switch(phy_user->max_speed_generation) { case SCIC_SDS_PARM_GEN3_SPEED: @@ -289,7 +268,7 @@ done: struct isci_port *phy_get_non_dummy_port( struct isci_phy *iphy) { - if (scic_sds_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) + if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) return NULL; return iphy->owning_port; @@ -302,7 +281,7 @@ struct isci_port *phy_get_non_dummy_port( * * */ -void scic_sds_phy_set_port( +void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport) { @@ -310,33 +289,23 @@ void scic_sds_phy_set_port( if (iphy->bcn_received_while_port_unassigned) { iphy->bcn_received_while_port_unassigned = false; - scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); + sci_port_broadcast_change_received(iphy->owning_port, iphy); } } -/** - * This method will initialize the constructed phy - * @sci_phy: - * @link_layer_registers: - * - * enum sci_status - */ -enum sci_status scic_sds_phy_initialize( - struct isci_phy *iphy, - struct scu_transport_layer_registers __iomem *transport_layer_registers, - struct scu_link_layer_registers __iomem *link_layer_registers) +enum sci_status sci_phy_initialize(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *tl, + struct scu_link_layer_registers __iomem *ll) { /* Perfrom the initialization of the TL hardware */ - scic_sds_phy_transport_layer_initialization( - iphy, - transport_layer_registers); + sci_phy_transport_layer_initialization(iphy, tl); /* Perofrm the initialization of the PE hardware */ - scic_sds_phy_link_layer_initialization(iphy, link_layer_registers); + sci_phy_link_layer_initialization(iphy, ll); - /* - * There is nothing that needs to be done in this state just - * transition to the stopped state. */ + /* There is nothing that needs to be done in this state just + * transition to the stopped state + */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; @@ -351,9 +320,7 @@ enum sci_status scic_sds_phy_initialize( * This will either be the RNi for the device or an invalid RNi if there * is no current device assigned to the phy. */ -void scic_sds_phy_setup_transport( - struct isci_phy *iphy, - u32 device_id) +void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) { u32 tl_control; @@ -368,15 +335,7 @@ void scic_sds_phy_setup_transport( writel(tl_control, &iphy->transport_layer_registers->control); } -/** - * - * @sci_phy: The phy object to be suspended. - * - * This function will perform the register reads/writes to suspend the SCU - * hardware protocol engine. none - */ -static void scic_sds_phy_suspend( - struct isci_phy *iphy) +static void sci_phy_suspend(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; @@ -386,12 +345,10 @@ static void scic_sds_phy_suspend( writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); - scic_sds_phy_setup_transport( - iphy, - SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); + sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } -void scic_sds_phy_resume(struct isci_phy *iphy) +void sci_phy_resume(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; @@ -402,34 +359,28 @@ void scic_sds_phy_resume(struct isci_phy *iphy) &iphy->link_layer_registers->phy_configuration); } -void scic_sds_phy_get_sas_address(struct isci_phy *iphy, - struct sci_sas_address *sas_address) +void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { - sas_address->high = readl(&iphy->link_layer_registers->source_sas_address_high); - sas_address->low = readl(&iphy->link_layer_registers->source_sas_address_low); + sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); + sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); } -void scic_sds_phy_get_attached_sas_address(struct isci_phy *iphy, - struct sci_sas_address *sas_address) +void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { struct sas_identify_frame *iaf; iaf = &iphy->frame_rcvd.iaf; - memcpy(sas_address, iaf->sas_addr, SAS_ADDR_SIZE); + memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); } -void scic_sds_phy_get_protocols(struct isci_phy *iphy, - struct scic_phy_proto *protocols) +void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) { - protocols->all = - (u16)(readl(&iphy-> - link_layer_registers->transmit_identification) & - 0x0000FFFF); + proto->all = readl(&iphy->link_layer_registers->transmit_identification); } -enum sci_status scic_sds_phy_start(struct isci_phy *iphy) +enum sci_status sci_phy_start(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_STOPPED) { dev_dbg(sciphy_to_dev(iphy), @@ -441,9 +392,9 @@ enum sci_status scic_sds_phy_start(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) +enum sci_status sci_phy_stop(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_INITIAL: @@ -467,9 +418,9 @@ enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) +enum sci_status sci_phy_reset(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_READY) { dev_dbg(sciphy_to_dev(iphy), @@ -481,9 +432,9 @@ enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) return SCI_SUCCESS; } -enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) +enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_SAS_POWER: { @@ -528,55 +479,37 @@ enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) } } -/* - * ***************************************************************************** - * * SCIC SDS PHY HELPER FUNCTIONS - * ***************************************************************************** */ - - -/** - * - * @sci_phy: The phy object that received SAS PHY DETECTED. - * - * This method continues the link training for the phy as if it were a SAS PHY - * instead of a SATA PHY. This is done because the completion queue had a SAS - * PHY DETECTED event when the state machine was expecting a SATA PHY event. - * none - */ -static void scic_sds_phy_start_sas_link_training( - struct isci_phy *iphy) +static void sci_phy_start_sas_link_training(struct isci_phy *iphy) { + /* continue the link training for the phy as if it were a SAS PHY + * instead of a SATA PHY. This is done because the completion queue had a SAS + * PHY DETECTED event when the state machine was expecting a SATA PHY event. + */ u32 phy_control; - phy_control = - readl(&iphy->link_layer_registers->phy_configuration); + phy_control = readl(&iphy->link_layer_registers->phy_configuration); phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); writel(phy_control, - &iphy->link_layer_registers->phy_configuration); + &iphy->link_layer_registers->phy_configuration); sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; } -/** - * - * @sci_phy: The phy object that received a SATA SPINUP HOLD event - * - * This method continues the link training for the phy as if it were a SATA PHY - * instead of a SAS PHY. This is done because the completion queue had a SATA - * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none - */ -static void scic_sds_phy_start_sata_link_training( - struct isci_phy *iphy) +static void sci_phy_start_sata_link_training(struct isci_phy *iphy) { + /* This method continues the link training for the phy as if it were a SATA PHY + * instead of a SAS PHY. This is done because the completion queue had a SATA + * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none + */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; } /** - * scic_sds_phy_complete_link_training - perform processing common to + * sci_phy_complete_link_training - perform processing common to * all protocols upon completion of link training. * @sci_phy: This parameter specifies the phy object for which link training * has completed. @@ -586,30 +519,28 @@ static void scic_sds_phy_start_sata_link_training( * sub-state machine. * */ -static void scic_sds_phy_complete_link_training( - struct isci_phy *iphy, - enum sas_linkrate max_link_rate, - u32 next_state) +static void sci_phy_complete_link_training(struct isci_phy *iphy, + enum sas_linkrate max_link_rate, + u32 next_state) { iphy->max_negotiated_speed = max_link_rate; sci_change_state(&iphy->sm, next_state); } -enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, - u32 event_code) +enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_OSSP_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_SATA_SPINUP_HOLD: - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); iphy->is_in_link_training = true; break; default: @@ -630,30 +561,24 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, break; case SCU_EVENT_SAS_15: case SCU_EVENT_SAS_15_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_1_5_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_3_0_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_6_0_GBPS, - SCI_PHY_SUB_AWAIT_IAF_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* * We were doing SAS PHY link training and received a SATA PHY event * continue OOB/SN as if this were a SATA PHY */ - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ @@ -673,14 +598,14 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* Backup the state machine */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* We were doing SAS PHY link training and received a * SATA PHY event continue OOB/SN as if this were a * SATA PHY */ - scic_sds_phy_start_sata_link_training(iphy); + sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: case SCU_EVENT_LINK_FAILURE: @@ -727,7 +652,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: @@ -760,7 +685,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: dev_warn(sciphy_to_dev(iphy), @@ -781,24 +706,18 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, break; case SCU_EVENT_SATA_15: case SCU_EVENT_SATA_15_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_1_5_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_3_0_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: - scic_sds_phy_complete_link_training( - iphy, - SAS_LINK_RATE_6_0_GBPS, - SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: /* Link failure change state back to the starting state */ @@ -808,7 +727,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, /* * There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ - scic_sds_phy_start_sas_link_training(iphy); + sci_phy_start_sas_link_training(iphy); break; default: dev_warn(sciphy_to_dev(iphy), @@ -851,7 +770,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, case SCU_EVENT_BROADCAST_CHANGE: /* Broadcast change received. Notify the port. */ if (phy_get_non_dummy_port(iphy) != NULL) - scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); + sci_port_broadcast_change_received(iphy->owning_port, iphy); else iphy->bcn_received_while_port_unassigned = true; break; @@ -886,10 +805,9 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, } } -enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, - u32 frame_index) +enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) { - enum scic_sds_phy_states state = iphy->sm.current_state_id; + enum sci_phy_states state = iphy->sm.current_state_id; struct isci_host *ihost = iphy->owning_port->owning_controller; enum sci_status result; unsigned long flags; @@ -899,9 +817,9 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, u32 *frame_words; struct sas_identify_frame iaf; - result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, - frame_index, - (void **)&frame_words); + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_words); if (result != SCI_SUCCESS) return result; @@ -933,15 +851,15 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, "unexpected frame id %x\n", __func__, frame_index); - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return result; } case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { struct dev_to_host_fis *frame_header; u32 *fis_frame_data; - result = scic_sds_unsolicited_frame_control_get_header( - &(scic_sds_phy_get_controller(iphy)->uf_control), + result = sci_unsolicited_frame_control_get_header( + &(sci_phy_get_controller(iphy)->uf_control), frame_index, (void **)&frame_header); @@ -950,14 +868,14 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, if ((frame_header->fis_type == FIS_REGD2H) && !(frame_header->status & ATA_BUSY)) { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, - frame_index, - (void **)&fis_frame_data); + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&fis_frame_data); spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); - scic_sds_controller_copy_sata_response(&iphy->frame_rcvd.fis, - frame_header, - fis_frame_data); + sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, + frame_header, + fis_frame_data); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); /* got IAF we can now go to the await spinup semaphore state */ @@ -971,7 +889,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, __func__, frame_index); /* Regardless of the result we are done with this frame with it */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return result; } @@ -983,7 +901,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, } -static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -991,71 +909,71 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); } -static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(ihost, iphy); + sci_controller_power_control_queue_insert(ihost, iphy); } -static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(ihost, iphy); + sci_controller_power_control_queue_remove(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_insert(ihost, iphy); + sci_controller_power_control_queue_insert(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; - scic_sds_controller_power_control_queue_remove(ihost, iphy); + sci_controller_power_control_queue_remove(ihost, iphy); } -static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } -static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } -static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - if (scic_sds_port_link_detected(iphy->owning_port, iphy)) { + if (sci_port_link_detected(iphy->owning_port, iphy)) { /* * Clear the PE suspend condition so we can actually @@ -1063,7 +981,7 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas * The hardware will not respond to the XRDY until the PE * suspend condition is cleared. */ - scic_sds_phy_resume(iphy); + sci_phy_resume(iphy); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SIGNATURE_FIS_TIMEOUT); @@ -1071,14 +989,14 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas iphy->is_in_link_training = false; } -static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) +static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } -static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1169,7 +1087,7 @@ static void scu_link_layer_tx_hard_reset( &iphy->link_layer_registers->phy_configuration); } -static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1182,12 +1100,12 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) scu_link_layer_stop_protocol_engine(iphy); if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) - scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), + sci_controller_link_down(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); } -static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1199,31 +1117,31 @@ static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) - scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), + sci_controller_link_down(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); } -static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - scic_sds_controller_link_up(scic_sds_phy_get_controller(iphy), + sci_controller_link_up(sci_phy_get_controller(iphy), phy_get_non_dummy_port(iphy), iphy); } -static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); - scic_sds_phy_suspend(iphy); + sci_phy_suspend(iphy); } -static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); @@ -1231,7 +1149,7 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm * the resetting state we don't notify the user regarding link up and * link down notifications */ - scic_sds_port_deactivate_phy(iphy->owning_port, iphy, false); + sci_port_deactivate_phy(iphy->owning_port, iphy, false); if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { scu_link_layer_tx_hard_reset(iphy); @@ -1243,57 +1161,57 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm } } -static const struct sci_base_state scic_sds_phy_state_table[] = { +static const struct sci_base_state sci_phy_state_table[] = { [SCI_PHY_INITIAL] = { }, [SCI_PHY_STOPPED] = { - .enter_state = scic_sds_phy_stopped_state_enter, + .enter_state = sci_phy_stopped_state_enter, }, [SCI_PHY_STARTING] = { - .enter_state = scic_sds_phy_starting_state_enter, + .enter_state = sci_phy_starting_state_enter, }, [SCI_PHY_SUB_INITIAL] = { - .enter_state = scic_sds_phy_starting_initial_substate_enter, + .enter_state = sci_phy_starting_initial_substate_enter, }, [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, [SCI_PHY_SUB_AWAIT_SAS_POWER] = { - .enter_state = scic_sds_phy_starting_await_sas_power_substate_enter, - .exit_state = scic_sds_phy_starting_await_sas_power_substate_exit, + .enter_state = sci_phy_starting_await_sas_power_substate_enter, + .exit_state = sci_phy_starting_await_sas_power_substate_exit, }, [SCI_PHY_SUB_AWAIT_SATA_POWER] = { - .enter_state = scic_sds_phy_starting_await_sata_power_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_power_substate_exit + .enter_state = sci_phy_starting_await_sata_power_substate_enter, + .exit_state = sci_phy_starting_await_sata_power_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { - .enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit + .enter_state = sci_phy_starting_await_sata_phy_substate_enter, + .exit_state = sci_phy_starting_await_sata_phy_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { - .enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter, - .exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit + .enter_state = sci_phy_starting_await_sata_speed_substate_enter, + .exit_state = sci_phy_starting_await_sata_speed_substate_exit }, [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { - .enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter, - .exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit + .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, + .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit }, [SCI_PHY_SUB_FINAL] = { - .enter_state = scic_sds_phy_starting_final_substate_enter, + .enter_state = sci_phy_starting_final_substate_enter, }, [SCI_PHY_READY] = { - .enter_state = scic_sds_phy_ready_state_enter, - .exit_state = scic_sds_phy_ready_state_exit, + .enter_state = sci_phy_ready_state_enter, + .exit_state = sci_phy_ready_state_exit, }, [SCI_PHY_RESETTING] = { - .enter_state = scic_sds_phy_resetting_state_enter, + .enter_state = sci_phy_resetting_state_enter, }, [SCI_PHY_FINAL] = { }, }; -void scic_sds_phy_construct(struct isci_phy *iphy, +void sci_phy_construct(struct isci_phy *iphy, struct isci_port *iport, u8 phy_index) { - sci_init_sm(&iphy->sm, scic_sds_phy_state_table, SCI_PHY_INITIAL); + sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); /* Copy the rest of the input data to our locals */ iphy->owning_port = iport; @@ -1309,14 +1227,13 @@ void scic_sds_phy_construct(struct isci_phy *iphy, void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) { - union scic_oem_parameters oem; + struct sci_oem_params *oem = &ihost->oem_parameters; u64 sci_sas_addr; __be64 sas_addr; - scic_oem_parameters_get(ihost, &oem); - sci_sas_addr = oem.sds1.phys[index].sas_address.high; + sci_sas_addr = oem->phys[index].sas_address.high; sci_sas_addr <<= 32; - sci_sas_addr |= oem.sds1.phys[index].sas_address.low; + sci_sas_addr |= oem->phys[index].sas_address.low; sas_addr = cpu_to_be64(sci_sas_addr); memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); @@ -1365,14 +1282,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, switch (func) { case PHY_FUNC_DISABLE: spin_lock_irqsave(&ihost->scic_lock, flags); - scic_sds_phy_stop(iphy); + sci_phy_stop(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_LINK_RESET: spin_lock_irqsave(&ihost->scic_lock, flags); - scic_sds_phy_stop(iphy); - scic_sds_phy_start(iphy); + sci_phy_stop(iphy); + sci_phy_start(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 19aa444517b..5d2c1b4906a 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -76,7 +76,7 @@ */ #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 -enum scic_sds_phy_protocol { +enum sci_phy_protocol { SCIC_SDS_PHY_PROTOCOL_UNKNOWN, SCIC_SDS_PHY_PROTOCOL_SAS, SCIC_SDS_PHY_PROTOCOL_SATA, @@ -95,7 +95,7 @@ struct isci_phy { struct sci_base_state_machine sm; struct isci_port *owning_port; enum sas_linkrate max_negotiated_speed; - enum scic_sds_phy_protocol protocol; + enum sci_phy_protocol protocol; u8 phy_index; bool bcn_received_while_port_unassigned; bool is_in_link_training; @@ -118,7 +118,7 @@ static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy) return iphy; } -struct scic_phy_cap { +struct sci_phy_cap { union { struct { /* @@ -147,7 +147,7 @@ struct scic_phy_cap { } __packed; /* this data structure reflects the link layer transmit identification reg */ -struct scic_phy_proto { +struct sci_phy_proto { union { struct { u16 _r_a:1; @@ -167,12 +167,12 @@ struct scic_phy_proto { /** - * struct scic_phy_properties - This structure defines the properties common to + * struct sci_phy_properties - This structure defines the properties common to * all phys that can be retrieved. * * */ -struct scic_phy_properties { +struct sci_phy_properties { /** * This field specifies the port that currently contains the * supplied phy. This field may be set to NULL @@ -194,12 +194,12 @@ struct scic_phy_properties { }; /** - * struct scic_sas_phy_properties - This structure defines the properties, + * struct sci_sas_phy_properties - This structure defines the properties, * specific to a SAS phy, that can be retrieved. * * */ -struct scic_sas_phy_properties { +struct sci_sas_phy_properties { /** * This field delineates the Identify Address Frame received * from the remote end point. @@ -210,17 +210,17 @@ struct scic_sas_phy_properties { * This field delineates the Phy capabilities structure received * from the remote end point. */ - struct scic_phy_cap rcvd_cap; + struct sci_phy_cap rcvd_cap; }; /** - * struct scic_sata_phy_properties - This structure defines the properties, + * struct sci_sata_phy_properties - This structure defines the properties, * specific to a SATA phy, that can be retrieved. * * */ -struct scic_sata_phy_properties { +struct sci_sata_phy_properties { /** * This field delineates the signature FIS received from the * attached target. @@ -236,12 +236,12 @@ struct scic_sata_phy_properties { }; /** - * enum scic_phy_counter_id - This enumeration depicts the various pieces of + * enum sci_phy_counter_id - This enumeration depicts the various pieces of * optional information that can be retrieved for a specific phy. * * */ -enum scic_phy_counter_id { +enum sci_phy_counter_id { /** * This PHY information field tracks the number of frames received. */ @@ -344,7 +344,7 @@ enum scic_phy_counter_id { SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR }; -enum scic_sds_phy_states { +enum sci_phy_states { /** * Simply the initial state for the base domain state machine. */ @@ -441,77 +441,77 @@ enum scic_sds_phy_states { }; /** - * scic_sds_phy_get_index() - + * sci_phy_get_index() - * * This macro returns the phy index for the specified phy */ -#define scic_sds_phy_get_index(phy) \ +#define sci_phy_get_index(phy) \ ((phy)->phy_index) /** - * scic_sds_phy_get_controller() - This macro returns the controller for this + * sci_phy_get_controller() - This macro returns the controller for this * phy * * */ -#define scic_sds_phy_get_controller(phy) \ - (scic_sds_port_get_controller((phy)->owning_port)) +#define sci_phy_get_controller(phy) \ + (sci_port_get_controller((phy)->owning_port)) -void scic_sds_phy_construct( +void sci_phy_construct( struct isci_phy *iphy, struct isci_port *iport, u8 phy_index); struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); -void scic_sds_phy_set_port( +void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport); -enum sci_status scic_sds_phy_initialize( +enum sci_status sci_phy_initialize( struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *transport_layer_registers, struct scu_link_layer_registers __iomem *link_layer_registers); -enum sci_status scic_sds_phy_start( +enum sci_status sci_phy_start( struct isci_phy *iphy); -enum sci_status scic_sds_phy_stop( +enum sci_status sci_phy_stop( struct isci_phy *iphy); -enum sci_status scic_sds_phy_reset( +enum sci_status sci_phy_reset( struct isci_phy *iphy); -void scic_sds_phy_resume( +void sci_phy_resume( struct isci_phy *iphy); -void scic_sds_phy_setup_transport( +void sci_phy_setup_transport( struct isci_phy *iphy, u32 device_id); -enum sci_status scic_sds_phy_event_handler( +enum sci_status sci_phy_event_handler( struct isci_phy *iphy, u32 event_code); -enum sci_status scic_sds_phy_frame_handler( +enum sci_status sci_phy_frame_handler( struct isci_phy *iphy, u32 frame_index); -enum sci_status scic_sds_phy_consume_power_handler( +enum sci_status sci_phy_consume_power_handler( struct isci_phy *iphy); -void scic_sds_phy_get_sas_address( +void sci_phy_get_sas_address( struct isci_phy *iphy, struct sci_sas_address *sas_address); -void scic_sds_phy_get_attached_sas_address( +void sci_phy_get_attached_sas_address( struct isci_phy *iphy, struct sci_sas_address *sas_address); -struct scic_phy_proto; -void scic_sds_phy_get_protocols( +struct sci_phy_proto; +void sci_phy_get_protocols( struct isci_phy *iphy, - struct scic_phy_proto *protocols); + struct sci_phy_proto *protocols); enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); struct isci_host; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index c434d5a0eff..1822ed68409 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -74,57 +74,35 @@ static void isci_port_change_state(struct isci_port *iport, enum isci_status sta spin_unlock_irqrestore(&iport->state_lock, flags); } -/* - * This function will indicate which protocols are supported by this port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * supported protocols. - * @protocols: This parameter specifies a pointer to a data structure - * which the core will copy the protocol values for the port from the - * transmit_identification register. - */ -static void -scic_sds_port_get_protocols(struct isci_port *iport, - struct scic_phy_proto *protocols) +static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) { u8 index; - protocols->all = 0; - + proto->all = 0; for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { - scic_sds_phy_get_protocols(iport->phy_table[index], - protocols); - } + struct isci_phy *iphy = iport->phy_table[index]; + + if (!iphy) + continue; + sci_phy_get_protocols(iphy, proto); } } -/** - * This method requests a list (mask) of the phys contained in the supplied SAS - * port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * phy mask. - * - * Return a bit mask indicating which phys are a part of this port. Each bit - * corresponds to a phy identifier (e.g. bit 0 = phy id 0). - */ -static u32 scic_sds_port_get_phys(struct isci_port *iport) +static u32 sci_port_get_phys(struct isci_port *iport) { u32 index; u32 mask; mask = 0; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) mask |= (1 << index); - } - } return mask; } /** - * scic_port_get_properties() - This method simply returns the properties + * sci_port_get_properties() - This method simply returns the properties * regarding the port, such as: physical index, protocols, sas address, etc. * @port: this parameter specifies the port for which to retrieve the physical * index. @@ -136,22 +114,22 @@ static u32 scic_sds_port_get_phys(struct isci_port *iport) * value is returned if the specified port is not valid. When this value is * returned, no data is copied to the properties output parameter. */ -static enum sci_status scic_port_get_properties(struct isci_port *iport, - struct scic_port_properties *prop) +static enum sci_status sci_port_get_properties(struct isci_port *iport, + struct sci_port_properties *prop) { if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) return SCI_FAILURE_INVALID_PORT; - prop->index = iport->logical_port_index; - prop->phy_mask = scic_sds_port_get_phys(iport); - scic_sds_port_get_sas_address(iport, &prop->local.sas_address); - scic_sds_port_get_protocols(iport, &prop->local.protocols); - scic_sds_port_get_attached_sas_address(iport, &prop->remote.sas_address); + prop->index = iport->logical_port_index; + prop->phy_mask = sci_port_get_phys(iport); + sci_port_get_sas_address(iport, &prop->local.sas_address); + sci_port_get_protocols(iport, &prop->local.protocols); + sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); return SCI_SUCCESS; } -static void scic_port_bcn_enable(struct isci_port *iport) +static void sci_port_bcn_enable(struct isci_port *iport) { struct isci_phy *iphy; u32 val; @@ -167,7 +145,7 @@ static void scic_port_bcn_enable(struct isci_port *iport) } } -/* called under scic_lock to stabilize phy:port associations */ +/* called under sci_lock to stabilize phy:port associations */ void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) { int i; @@ -209,7 +187,7 @@ static void isci_port_bc_change_received(struct isci_host *ihost, ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); } - scic_port_bcn_enable(iport); + sci_port_bcn_enable(iport); } static void isci_port_link_up(struct isci_host *isci_host, @@ -217,7 +195,7 @@ static void isci_port_link_up(struct isci_host *isci_host, struct isci_phy *iphy) { unsigned long flags; - struct scic_port_properties properties; + struct sci_port_properties properties; unsigned long success = true; BUG_ON(iphy->isci_port != NULL); @@ -232,7 +210,7 @@ static void isci_port_link_up(struct isci_host *isci_host, isci_port_change_state(iphy->isci_port, isci_starting); - scic_port_get_properties(iport, &properties); + sci_port_get_properties(iport, &properties); if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { u64 attached_sas_address; @@ -245,7 +223,7 @@ static void isci_port_link_up(struct isci_host *isci_host, * automagically assign a SAS address to the end device * for the purpose of creating a port. This SAS address * will not be the same as assigned to the PHY and needs - * to be obtained from struct scic_port_properties properties. + * to be obtained from struct sci_port_properties properties. */ attached_sas_address = properties.remote.sas_address.high; attached_sas_address <<= 32; @@ -399,50 +377,40 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port, * doesn't preclude all configurations. It merely ensures that a phy is part * of the allowable set of phy identifiers for that port. For example, one * could assign phy 3 to port 0 and no other phys. Please refer to - * scic_sds_port_is_phy_mask_valid() for information regarding whether the + * sci_port_is_phy_mask_valid() for information regarding whether the * phy_mask for a port can be supported. bool true if this is a valid phy * assignment for the port false if this is not a valid phy assignment for the * port */ -bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, - u32 phy_index) +bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) { + struct isci_host *ihost = iport->owning_controller; + struct sci_user_parameters *user = &ihost->user_parameters; + /* Initialize to invalid value. */ u32 existing_phy_index = SCI_MAX_PHYS; u32 index; - if ((iport->physical_port_index == 1) && (phy_index != 1)) { + if ((iport->physical_port_index == 1) && (phy_index != 1)) return false; - } - if (iport->physical_port_index == 3 && phy_index != 3) { + if (iport->physical_port_index == 3 && phy_index != 3) return false; - } - if ( - (iport->physical_port_index == 2) - && ((phy_index == 0) || (phy_index == 1)) - ) { + if (iport->physical_port_index == 2 && + (phy_index == 0 || phy_index == 1)) return false; - } - for (index = 0; index < SCI_MAX_PHYS; index++) { - if ((iport->phy_table[index] != NULL) - && (index != phy_index)) { + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index] && index != phy_index) existing_phy_index = index; - } - } - /* - * Ensure that all of the phys in the port are capable of - * operating at the same maximum link rate. */ - if ( - (existing_phy_index < SCI_MAX_PHYS) - && (iport->owning_controller->user_parameters.sds1.phys[ - phy_index].max_speed_generation != - iport->owning_controller->user_parameters.sds1.phys[ - existing_phy_index].max_speed_generation) - ) + /* Ensure that all of the phys in the port are capable of + * operating at the same maximum link rate. + */ + if (existing_phy_index < SCI_MAX_PHYS && + user->phys[phy_index].max_speed_generation != + user->phys[existing_phy_index].max_speed_generation) return false; return true; @@ -460,7 +428,7 @@ bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, * phy mask can be supported. true if this is a valid phy assignment for the * port false if this is not a valid phy assignment for the port */ -static bool scic_sds_port_is_phy_mask_valid( +static bool sci_port_is_phy_mask_valid( struct isci_port *iport, u32 phy_mask) { @@ -493,10 +461,10 @@ static bool scic_sds_port_is_phy_mask_valid( * the port. Currently, the lowest order phy that is connected is returned. * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is * returned if there are no currently active (i.e. connected to a remote end - * point) phys contained in the port. All other values specify a struct scic_sds_phy + * point) phys contained in the port. All other values specify a struct sci_phy * object that is active in the port. */ -static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *iport) +static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) { u32 index; struct isci_phy *iphy; @@ -506,14 +474,14 @@ static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *ipor * connected to the remote end-point. */ iphy = iport->phy_table[index]; - if (iphy && scic_sds_port_active_phy(iport, iphy)) + if (iphy && sci_port_active_phy(iport, iphy)) return iphy; } return NULL; } -static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) +static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Check to see if we can add this phy to a port * that means that the phy is not part of a port and that the port does @@ -521,13 +489,13 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc */ if (!iport->phy_table[iphy->phy_index] && !phy_get_non_dummy_port(iphy) && - scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* Phy is being added in the stopped state so we are in MPC mode * make logical port index = physical port index */ iport->logical_port_index = iport->physical_port_index; iport->phy_table[iphy->phy_index] = iphy; - scic_sds_phy_set_port(iphy, iport); + sci_phy_set_port(iphy, iport); return SCI_SUCCESS; } @@ -535,8 +503,7 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc return SCI_FAILURE; } -static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, - struct isci_phy *iphy) +static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Make sure that this phy is part of this port */ if (iport->phy_table[iphy->phy_index] == iphy && @@ -544,7 +511,7 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, struct isci_host *ihost = iport->owning_controller; /* Yep it is assigned to this port so remove it */ - scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); + sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); iport->phy_table[iphy->phy_index] = NULL; return SCI_SUCCESS; } @@ -552,45 +519,18 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, return SCI_FAILURE; } - -/** - * This method requests the SAS address for the supplied SAS port from the SCI - * implementation. - * @sci_port: a handle corresponding to the SAS port for which to return the - * SAS address. - * @sas_address: This parameter specifies a pointer to a SAS address structure - * into which the core will copy the SAS address for the port. - * - */ -void scic_sds_port_get_sas_address( - struct isci_port *iport, - struct sci_sas_address *sas_address) +void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { u32 index; - sas_address->high = 0; - sas_address->low = 0; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - if (iport->phy_table[index] != NULL) { - scic_sds_phy_get_sas_address(iport->phy_table[index], sas_address); - } - } + sas->high = 0; + sas->low = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + sci_phy_get_sas_address(iport->phy_table[index], sas); } -/* - * This function requests the SAS address for the device directly attached to - * this SAS port. - * @sci_port: a handle corresponding to the SAS port for which to return the - * SAS address. - * @sas_address: This parameter specifies a pointer to a SAS address structure - * into which the core will copy the SAS address for the device directly - * attached to the port. - * - */ -void scic_sds_port_get_attached_sas_address( - struct isci_port *iport, - struct sci_sas_address *sas_address) +void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { struct isci_phy *iphy; @@ -598,23 +538,22 @@ void scic_sds_port_get_attached_sas_address( * Ensure that the phy is both part of the port and currently * connected to the remote end-point. */ - iphy = scic_sds_port_get_a_connected_phy(iport); + iphy = sci_port_get_a_connected_phy(iport); if (iphy) { if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { - scic_sds_phy_get_attached_sas_address(iphy, - sas_address); + sci_phy_get_attached_sas_address(iphy, sas); } else { - scic_sds_phy_get_sas_address(iphy, sas_address); - sas_address->low += iphy->phy_index; + sci_phy_get_sas_address(iphy, sas); + sas->low += iphy->phy_index; } } else { - sas_address->high = 0; - sas_address->low = 0; + sas->high = 0; + sas->low = 0; } } /** - * scic_sds_port_construct_dummy_rnc() - create dummy rnc for si workaround + * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround * * @sci_port: logical port on which we need to create the remote node context * @rni: remote node index for this remote node context. @@ -623,7 +562,7 @@ void scic_sds_port_get_attached_sas_address( * This structure will be posted to the hardware to work around a scheduler * error in the hardware. */ -static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) +static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) { union scu_remote_node_context *rnc; @@ -651,7 +590,7 @@ static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) * structure will be posted to the hardwre to work around a scheduler error * in the hardware. */ -static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) +static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) { struct isci_host *ihost = iport->owning_controller; struct scu_task_context *task_context; @@ -671,7 +610,7 @@ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) task_context->task_phase = 0x01; } -static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) +static void sci_port_destroy_dummy_resources(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; @@ -679,93 +618,43 @@ static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) isci_free_tag(ihost, iport->reserved_tag); if (iport->reserved_rni != SCU_DUMMY_INDEX) - scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, + sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 1, iport->reserved_rni); iport->reserved_rni = SCU_DUMMY_INDEX; iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; } -/** - * This method performs initialization of the supplied port. Initialization - * includes: - state machine initialization - member variable initialization - * - configuring the phy_mask - * @sci_port: - * @transport_layer_registers: - * @port_task_scheduler_registers: - * @port_configuration_regsiter: - * - * enum sci_status SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION This value is returned - * if the phy being added to the port - */ -enum sci_status scic_sds_port_initialize( - struct isci_port *iport, - void __iomem *port_task_scheduler_registers, - void __iomem *port_configuration_regsiter, - void __iomem *viit_registers) -{ - iport->port_task_scheduler_registers = port_task_scheduler_registers; - iport->port_pe_configuration_register = port_configuration_regsiter; - iport->viit_registers = viit_registers; - - return SCI_SUCCESS; -} - - -/** - * This method assigns the direct attached device ID for this port. - * - * @param[in] iport The port for which the direct attached device id is to - * be assigned. - * @param[in] device_id The direct attached device ID to assign to the port. - * This will be the RNi for the device - */ -void scic_sds_port_setup_transports( - struct isci_port *iport, - u32 device_id) +void sci_port_setup_transports(struct isci_port *iport, u32 device_id) { u8 index; for (index = 0; index < SCI_MAX_PHYS; index++) { if (iport->active_phy_mask & (1 << index)) - scic_sds_phy_setup_transport(iport->phy_table[index], device_id); + sci_phy_setup_transport(iport->phy_table[index], device_id); } } -/** - * - * @sci_port: This is the port on which the phy should be enabled. - * @sci_phy: This is the specific phy which to enable. - * @do_notify_user: This parameter specifies whether to inform the user (via - * scic_cb_port_link_up()) as to the fact that a new phy as become ready. - * - * This function will activate the phy in the port. - * Activation includes: - adding - * the phy to the port - enabling the Protocol Engine in the silicon. - - * notifying the user that the link is up. none - */ -static void scic_sds_port_activate_phy(struct isci_port *iport, - struct isci_phy *iphy, - bool do_notify_user) +static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) { struct isci_host *ihost = iport->owning_controller; if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) - scic_sds_phy_resume(iphy); + sci_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; - scic_sds_controller_clear_invalid_phy(ihost, iphy); + sci_controller_clear_invalid_phy(ihost, iphy); if (do_notify_user == true) isci_port_link_up(ihost, iport, iphy); } -void scic_sds_port_deactivate_phy(struct isci_port *iport, - struct isci_phy *iphy, - bool do_notify_user) +void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) { - struct isci_host *ihost = scic_sds_port_get_controller(iport); + struct isci_host *ihost = sci_port_get_controller(iport); iport->active_phy_mask &= ~(1 << iphy->phy_index); @@ -779,16 +668,7 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, isci_port_link_down(ihost, iphy, iport); } -/** - * - * @sci_port: This is the port on which the phy should be disabled. - * @sci_phy: This is the specific phy which to disabled. - * - * This function will disable the phy and report that the phy is not valid for - * this port object. None - */ -static void scic_sds_port_invalid_link_up(struct isci_port *iport, - struct isci_phy *iphy) +static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; @@ -798,12 +678,12 @@ static void scic_sds_port_invalid_link_up(struct isci_port *iport, * invalid link. */ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { - scic_sds_controller_set_invalid_phy(ihost, iphy); + sci_controller_set_invalid_phy(ihost, iphy); dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); } } -static bool is_port_ready_state(enum scic_sds_port_states state) +static bool is_port_ready_state(enum sci_port_states state) { switch (state) { case SCI_PORT_READY: @@ -818,10 +698,10 @@ static bool is_port_ready_state(enum scic_sds_port_states state) /* flag dummy rnc hanling when exiting a ready state */ static void port_state_machine_change(struct isci_port *iport, - enum scic_sds_port_states state) + enum sci_port_states state) { struct sci_base_state_machine *sm = &iport->sm; - enum scic_sds_port_states old_state = sm->current_state_id; + enum sci_port_states old_state = sm->current_state_id; if (is_port_ready_state(old_state) && !is_port_ready_state(state)) iport->ready_exit = true; @@ -831,11 +711,11 @@ static void port_state_machine_change(struct isci_port *iport, } /** - * scic_sds_port_general_link_up_handler - phy can be assigned to port? - * @sci_port: scic_sds_port object for which has a phy that has gone link up. + * sci_port_general_link_up_handler - phy can be assigned to port? + * @sci_port: sci_port object for which has a phy that has gone link up. * @sci_phy: This is the struct isci_phy object that has gone link up. * @do_notify_user: This parameter specifies whether to inform the user (via - * scic_cb_port_link_up()) as to the fact that a new phy as become ready. + * sci_port_link_up()) as to the fact that a new phy as become ready. * * Determine if this phy can be assigned to this * port . If the phy is not a valid PHY for @@ -843,15 +723,15 @@ static void port_state_machine_change(struct isci_port *iport, * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in * the same port. none */ -static void scic_sds_port_general_link_up_handler(struct isci_port *iport, +static void sci_port_general_link_up_handler(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { struct sci_sas_address port_sas_address; struct sci_sas_address phy_sas_address; - scic_sds_port_get_attached_sas_address(iport, &port_sas_address); - scic_sds_phy_get_attached_sas_address(iphy, &phy_sas_address); + sci_port_get_attached_sas_address(iport, &port_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_sas_address); /* If the SAS address of the new phy matches the SAS address of * other phys in the port OR this is the first phy in the port, @@ -863,11 +743,11 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, iport->active_phy_mask == 0) { struct sci_base_state_machine *sm = &iport->sm; - scic_sds_port_activate_phy(iport, iphy, do_notify_user); + sci_port_activate_phy(iport, iphy, do_notify_user); if (sm->current_state_id == SCI_PORT_RESETTING) port_state_machine_change(iport, SCI_PORT_READY); } else - scic_sds_port_invalid_link_up(iport, iphy); + sci_port_invalid_link_up(iport, iphy); } @@ -881,7 +761,7 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, * bool true Is returned if this is a wide ported port. false Is returned if * this is a narrow port. */ -static bool scic_sds_port_is_wide(struct isci_port *iport) +static bool sci_port_is_wide(struct isci_port *iport) { u32 index; u32 phy_count = 0; @@ -909,14 +789,14 @@ static bool scic_sds_port_is_wide(struct isci_port *iport) * wide ports and direct attached phys. Since there are no wide ported SATA * devices this could become an invalid port configuration. */ -bool scic_sds_port_link_detected( +bool sci_port_link_detected( struct isci_port *iport, struct isci_phy *iphy) { if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && - scic_sds_port_is_wide(iport)) { - scic_sds_port_invalid_link_up(iport, iphy); + sci_port_is_wide(iport)) { + sci_port_invalid_link_up(iport, iphy); return false; } @@ -977,11 +857,11 @@ done: * * */ -static void scic_sds_port_update_viit_entry(struct isci_port *iport) +static void sci_port_update_viit_entry(struct isci_port *iport) { struct sci_sas_address sas_address; - scic_sds_port_get_sas_address(iport, &sas_address); + sci_port_get_sas_address(iport, &sas_address); writel(sas_address.high, &iport->viit_registers->initiator_sas_address_hi); @@ -999,7 +879,7 @@ static void scic_sds_port_update_viit_entry(struct isci_port *iport) &iport->viit_registers->status); } -enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) +enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) { u16 index; struct isci_phy *iphy; @@ -1010,7 +890,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) * lowest maximum link rate. */ for (index = 0; index < SCI_MAX_PHYS; index++) { iphy = iport->phy_table[index]; - if (iphy && scic_sds_port_active_phy(iport, iphy) && + if (iphy && sci_port_active_phy(iport, iphy) && iphy->max_negotiated_speed < max_allowed_speed) max_allowed_speed = iphy->max_negotiated_speed; } @@ -1018,7 +898,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) return max_allowed_speed; } -static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) +static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1028,7 +908,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) } /** - * scic_sds_port_post_dummy_request() - post dummy/workaround request + * sci_port_post_dummy_request() - post dummy/workaround request * @sci_port: port to post task * * Prevent the hardware scheduler from posting new requests to the front @@ -1036,7 +916,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) * ongoing requests. * */ -static void scic_sds_port_post_dummy_request(struct isci_port *iport) +static void sci_port_post_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; @@ -1050,7 +930,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1060,7 +940,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) * @sci_port: The port on which the task must be aborted. * */ -static void scic_sds_port_abort_dummy_request(struct isci_port *iport) +static void sci_port_abort_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; @@ -1074,7 +954,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1084,7 +964,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) * This method will resume the port task scheduler for this port object. none */ static void -scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) +sci_port_resume_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1093,11 +973,11 @@ scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - scic_sds_port_suspend_port_task_scheduler(iport); + sci_port_suspend_port_task_scheduler(iport); iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; @@ -1108,7 +988,7 @@ static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_mac } } -static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) { u32 index; struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1124,18 +1004,18 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state } } - scic_sds_port_update_viit_entry(iport); + sci_port_update_viit_entry(iport); - scic_sds_port_resume_port_task_scheduler(iport); + sci_port_resume_port_task_scheduler(iport); /* * Post the dummy task for the port so the hardware can schedule * io correctly */ - scic_sds_port_post_dummy_request(iport); + sci_port_post_dummy_request(iport); } -static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) +static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; @@ -1157,7 +1037,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } /** @@ -1168,7 +1048,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports * the port not ready and suspends the port task scheduler. none */ -static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1178,15 +1058,15 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ * the hardware will treat this as a NOP and just return abort * complete. */ - scic_sds_port_abort_dummy_request(iport); + sci_port_abort_dummy_request(iport); isci_port_not_ready(ihost, iport); if (iport->ready_exit) - scic_sds_port_invalidate_dummy_remote_node(iport); + sci_port_invalidate_dummy_remote_node(iport); } -static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1201,20 +1081,20 @@ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state SCI_PORT_SUB_OPERATIONAL); } -static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) +static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); - scic_sds_port_suspend_port_task_scheduler(iport); + sci_port_suspend_port_task_scheduler(iport); if (iport->ready_exit) - scic_sds_port_invalidate_dummy_remote_node(iport); + sci_port_invalidate_dummy_remote_node(iport); } -enum sci_status scic_sds_port_start(struct isci_port *iport) +enum sci_status sci_port_start(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; enum sci_status status = SCI_SUCCESS; - enum scic_sds_port_states state; + enum sci_port_states state; u32 phy_mask; state = iport->sm.current_state_id; @@ -1234,11 +1114,11 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) } if (iport->reserved_rni == SCU_DUMMY_INDEX) { - u16 rni = scic_sds_remote_node_table_allocate_remote_node( + u16 rni = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, 1); if (rni != SCU_DUMMY_INDEX) - scic_sds_port_construct_dummy_rnc(iport, rni); + sci_port_construct_dummy_rnc(iport, rni); else status = SCI_FAILURE_INSUFFICIENT_RESOURCES; iport->reserved_rni = rni; @@ -1251,19 +1131,19 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) if (tag == SCI_CONTROLLER_INVALID_IO_TAG) status = SCI_FAILURE_INSUFFICIENT_RESOURCES; else - scic_sds_port_construct_dummy_task(iport, tag); + sci_port_construct_dummy_task(iport, tag); iport->reserved_tag = tag; } if (status == SCI_SUCCESS) { - phy_mask = scic_sds_port_get_phys(iport); + phy_mask = sci_port_get_phys(iport); /* * There are one or more phys assigned to this port. Make sure * the port's phy mask is in fact legal and supported by the * silicon. */ - if (scic_sds_port_is_phy_mask_valid(iport, phy_mask) == true) { + if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { port_state_machine_change(iport, SCI_PORT_READY); @@ -1273,14 +1153,14 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) } if (status != SCI_SUCCESS) - scic_sds_port_destroy_dummy_resources(iport); + sci_port_destroy_dummy_resources(iport); return status; } -enum sci_status scic_sds_port_stop(struct isci_port *iport) +enum sci_status sci_port_stop(struct isci_port *iport) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1300,11 +1180,11 @@ enum sci_status scic_sds_port_stop(struct isci_port *iport) } } -static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout) +static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) { enum sci_status status = SCI_FAILURE_INVALID_PHY; struct isci_phy *iphy = NULL; - enum scic_sds_port_states state; + enum sci_port_states state; u32 phy_index; state = iport->sm.current_state_id; @@ -1317,7 +1197,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout /* Select a phy on which we can send the hard reset request. */ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { iphy = iport->phy_table[phy_index]; - if (iphy && !scic_sds_port_active_phy(iport, iphy)) { + if (iphy && !sci_port_active_phy(iport, iphy)) { /* * We found a phy but it is not ready select * different phy @@ -1329,7 +1209,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout /* If we have a phy then go ahead and start the reset procedure */ if (!iphy) return status; - status = scic_sds_phy_reset(iphy); + status = sci_phy_reset(iphy); if (status != SCI_SUCCESS) return status; @@ -1342,7 +1222,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout } /** - * scic_sds_port_add_phy() - + * sci_port_add_phy() - * @sci_port: This parameter specifies the port in which the phy will be added. * @sci_phy: This parameter is the phy which is to be added to the port. * @@ -1350,11 +1230,11 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other * status is a failure to add the phy to the port. */ -enum sci_status scic_sds_port_add_phy(struct isci_port *iport, +enum sci_status sci_port_add_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1362,7 +1242,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, struct sci_sas_address port_sas_address; /* Read the port assigned SAS Address if there is one */ - scic_sds_port_get_sas_address(iport, &port_sas_address); + sci_port_get_sas_address(iport, &port_sas_address); if (port_sas_address.high != 0 && port_sas_address.low != 0) { struct sci_sas_address phy_sas_address; @@ -1370,32 +1250,32 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, /* Make sure that the PHY SAS Address matches the SAS Address * for this port */ - scic_sds_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_sas_address(iphy, &phy_sas_address); if (port_sas_address.high != phy_sas_address.high || port_sas_address.low != phy_sas_address.low) return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } - return scic_sds_port_set_phy(iport, iphy); + return sci_port_set_phy(iport, iphy); } case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: - status = scic_sds_port_set_phy(iport, iphy); + status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return status; case SCI_PORT_SUB_CONFIGURING: - status = scic_sds_port_set_phy(iport, iphy); + status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); /* Re-enter the configuring state since this may be the last phy in * the port. @@ -1411,7 +1291,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, } /** - * scic_sds_port_remove_phy() - + * sci_port_remove_phy() - * @sci_port: This parameter specifies the port in which the phy will be added. * @sci_phy: This parameter is the phy which is to be added to the port. * @@ -1419,33 +1299,33 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any * other status is a failure to add the phy to the port. */ -enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, +enum sci_status sci_port_remove_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: - return scic_sds_port_clear_phy(iport, iphy); + return sci_port_clear_phy(iport, iphy); case SCI_PORT_SUB_OPERATIONAL: - status = scic_sds_port_clear_phy(iport, iphy); + status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; case SCI_PORT_SUB_CONFIGURING: - status = scic_sds_port_clear_phy(iport, iphy); + status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); /* Re-enter the configuring state since this may be the last phy in * the port @@ -1460,10 +1340,10 @@ enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, } } -enum sci_status scic_sds_port_link_up(struct isci_port *iport, +enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1471,13 +1351,13 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, /* Since this is the first phy going link up for the port we * can just enable it and continue */ - scic_sds_port_activate_phy(iport, iphy, true); + sci_port_activate_phy(iport, iphy, true); port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); return SCI_SUCCESS; case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, true); return SCI_SUCCESS; case SCI_PORT_RESETTING: /* TODO We should make sure that the phy that has gone @@ -1494,7 +1374,7 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ - scic_sds_port_general_link_up_handler(iport, iphy, false); + sci_port_general_link_up_handler(iport, iphy, false); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), @@ -1503,15 +1383,15 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, } } -enum sci_status scic_sds_port_link_down(struct isci_port *iport, +enum sci_status sci_port_link_down(struct isci_port *iport, struct isci_phy *iphy) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_deactivate_phy(iport, iphy, true); + sci_port_deactivate_phy(iport, iphy, true); /* If there are no active phys left in the port, then * transition the port to the WAITING state until such time @@ -1524,7 +1404,7 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, case SCI_PORT_RESETTING: /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ - scic_sds_port_deactivate_phy(iport, iphy, false); + sci_port_deactivate_phy(iport, iphy, false); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), @@ -1533,11 +1413,11 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, } } -enum sci_status scic_sds_port_start_io(struct isci_port *iport, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_port_start_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1553,11 +1433,11 @@ enum sci_status scic_sds_port_start_io(struct isci_port *iport, } } -enum sci_status scic_sds_port_complete_io(struct isci_port *iport, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_port_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) { - enum scic_sds_port_states state; + enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { @@ -1566,7 +1446,7 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, "%s: in wrong state: %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; case SCI_PORT_STOPPING: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) port_state_machine_change(iport, @@ -1577,10 +1457,10 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, case SCI_PORT_FAILED: case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); break; case SCI_PORT_SUB_CONFIGURING: - scic_sds_port_decrement_request_count(iport); + sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) { port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); @@ -1590,32 +1470,17 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, return SCI_SUCCESS; } -/** - * - * @sci_port: This is the port object which to suspend. - * - * This method will enable the SCU Port Task Scheduler for this port object but - * will leave the port task scheduler in a suspended state. none - */ -static void -scic_sds_port_enable_port_task_scheduler(struct isci_port *iport) +static void sci_port_enable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; + /* enable the port task scheduler in a suspended state */ pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -/** - * - * @sci_port: This is the port object which to resume. - * - * This method will disable the SCU port task scheduler for this port object. - * none - */ -static void -scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) +static void sci_port_disable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; @@ -1625,7 +1490,7 @@ scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) writel(pts_control_value, &iport->port_task_scheduler_registers->control); } -static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) +static void sci_port_post_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; @@ -1639,7 +1504,7 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_32 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); /* ensure hardware has seen the post rnc command and give it * ample time to act before sending the suspend @@ -1650,10 +1515,10 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; - scic_sds_controller_post_request(ihost, command); + sci_controller_post_request(ihost, command); } -static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1662,19 +1527,19 @@ static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) * If we enter this state becasuse of a request to stop * the port then we want to disable the hardwares port * task scheduler. */ - scic_sds_port_disable_port_task_scheduler(iport); + sci_port_disable_port_task_scheduler(iport); } } -static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) +static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); /* Enable and suspend the port task scheduler */ - scic_sds_port_enable_port_task_scheduler(iport); + sci_port_enable_port_task_scheduler(iport); } -static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; @@ -1687,30 +1552,30 @@ static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) isci_port_not_ready(ihost, iport); /* Post and suspend the dummy remote node context for this port. */ - scic_sds_port_post_dummy_remote_node(iport); + sci_port_post_dummy_remote_node(iport); /* Start the ready substate machine */ port_state_machine_change(iport, SCI_PORT_SUB_WAITING); } -static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm) +static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); } -static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm) +static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); - scic_sds_port_destroy_dummy_resources(iport); + sci_port_destroy_dummy_resources(iport); } -static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) +static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); @@ -1719,40 +1584,40 @@ static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) /* --------------------------------------------------------------------------- */ -static const struct sci_base_state scic_sds_port_state_table[] = { +static const struct sci_base_state sci_port_state_table[] = { [SCI_PORT_STOPPED] = { - .enter_state = scic_sds_port_stopped_state_enter, - .exit_state = scic_sds_port_stopped_state_exit + .enter_state = sci_port_stopped_state_enter, + .exit_state = sci_port_stopped_state_exit }, [SCI_PORT_STOPPING] = { - .exit_state = scic_sds_port_stopping_state_exit + .exit_state = sci_port_stopping_state_exit }, [SCI_PORT_READY] = { - .enter_state = scic_sds_port_ready_state_enter, + .enter_state = sci_port_ready_state_enter, }, [SCI_PORT_SUB_WAITING] = { - .enter_state = scic_sds_port_ready_substate_waiting_enter, + .enter_state = sci_port_ready_substate_waiting_enter, }, [SCI_PORT_SUB_OPERATIONAL] = { - .enter_state = scic_sds_port_ready_substate_operational_enter, - .exit_state = scic_sds_port_ready_substate_operational_exit + .enter_state = sci_port_ready_substate_operational_enter, + .exit_state = sci_port_ready_substate_operational_exit }, [SCI_PORT_SUB_CONFIGURING] = { - .enter_state = scic_sds_port_ready_substate_configuring_enter, - .exit_state = scic_sds_port_ready_substate_configuring_exit + .enter_state = sci_port_ready_substate_configuring_enter, + .exit_state = sci_port_ready_substate_configuring_exit }, [SCI_PORT_RESETTING] = { - .exit_state = scic_sds_port_resetting_state_exit + .exit_state = sci_port_resetting_state_exit }, [SCI_PORT_FAILED] = { - .enter_state = scic_sds_port_failed_state_enter, + .enter_state = sci_port_failed_state_enter, } }; -void scic_sds_port_construct(struct isci_port *iport, u8 index, +void sci_port_construct(struct isci_port *iport, u8 index, struct isci_host *ihost) { - sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); + sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); iport->logical_port_index = SCIC_SDS_DUMMY_PORT; iport->physical_port_index = index; @@ -1798,9 +1663,7 @@ enum isci_status isci_port_get_state( return isci_port->status; } -void scic_sds_port_broadcast_change_received( - struct isci_port *iport, - struct isci_phy *iphy) +void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; @@ -1823,7 +1686,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor spin_lock_irqsave(&ihost->scic_lock, flags); #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT - status = scic_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); + status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -1840,7 +1703,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor ret = TMF_RESP_FUNC_FAILED; dev_err(&ihost->pdev->dev, - "%s: iport = %p; scic_port_hard_reset call" + "%s: iport = %p; sci_port_hard_reset call" " failed 0x%x\n", __func__, iport, status); @@ -1863,8 +1726,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor if (!iphy) continue; - scic_sds_phy_stop(iphy); - scic_sds_phy_start(iphy); + sci_phy_stop(iphy); + sci_phy_start(iphy); } spin_unlock_irqrestore(&ihost->scic_lock, flags); } diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 9a9be7b47b4..4c4ab8126d9 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -123,7 +123,7 @@ struct isci_port { struct scu_viit_entry __iomem *viit_registers; }; -enum scic_port_not_ready_reason_code { +enum sci_port_not_ready_reason_code { SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, @@ -132,25 +132,25 @@ enum scic_port_not_ready_reason_code { SCIC_PORT_NOT_READY_REASON_CODE_MAX }; -struct scic_port_end_point_properties { +struct sci_port_end_point_properties { struct sci_sas_address sas_address; - struct scic_phy_proto protocols; + struct sci_phy_proto protocols; }; -struct scic_port_properties { +struct sci_port_properties { u32 index; - struct scic_port_end_point_properties local; - struct scic_port_end_point_properties remote; + struct sci_port_end_point_properties local; + struct sci_port_end_point_properties remote; u32 phy_mask; }; /** - * enum scic_sds_port_states - This enumeration depicts all the states for the + * enum sci_port_states - This enumeration depicts all the states for the * common port state machine. * * */ -enum scic_sds_port_states { +enum sci_port_states { /** * This state indicates that the port has successfully been stopped. * In this state no new IO operations are permitted. @@ -211,23 +211,23 @@ enum scic_sds_port_states { }; /** - * scic_sds_port_get_controller() - + * sci_port_get_controller() - * * Helper macro to get the owning controller of this port */ -#define scic_sds_port_get_controller(this_port) \ +#define sci_port_get_controller(this_port) \ ((this_port)->owning_controller) /** - * scic_sds_port_get_index() - + * sci_port_get_index() - * * This macro returns the physical port index for this port object */ -#define scic_sds_port_get_index(this_port) \ +#define sci_port_get_index(this_port) \ ((this_port)->physical_port_index) -static inline void scic_sds_port_decrement_request_count(struct isci_port *iport) +static inline void sci_port_decrement_request_count(struct isci_port *iport) { if (WARN_ONCE(iport->started_request_count == 0, "%s: tried to decrement started_request_count past 0!?", @@ -237,79 +237,73 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport iport->started_request_count--; } -#define scic_sds_port_active_phy(port, phy) \ +#define sci_port_active_phy(port, phy) \ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) -void scic_sds_port_construct( +void sci_port_construct( struct isci_port *iport, u8 port_index, struct isci_host *ihost); -enum sci_status scic_sds_port_initialize( - struct isci_port *iport, - void __iomem *port_task_scheduler_registers, - void __iomem *port_configuration_regsiter, - void __iomem *viit_registers); - -enum sci_status scic_sds_port_start(struct isci_port *iport); -enum sci_status scic_sds_port_stop(struct isci_port *iport); +enum sci_status sci_port_start(struct isci_port *iport); +enum sci_status sci_port_stop(struct isci_port *iport); -enum sci_status scic_sds_port_add_phy( +enum sci_status sci_port_add_phy( struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_remove_phy( +enum sci_status sci_port_remove_phy( struct isci_port *iport, struct isci_phy *iphy); -void scic_sds_port_setup_transports( +void sci_port_setup_transports( struct isci_port *iport, u32 device_id); void isci_port_bcn_enable(struct isci_host *, struct isci_port *); -void scic_sds_port_deactivate_phy( +void sci_port_deactivate_phy( struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user); -bool scic_sds_port_link_detected( +bool sci_port_link_detected( struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_link_up(struct isci_port *iport, +enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy); -enum sci_status scic_sds_port_link_down(struct isci_port *iport, +enum sci_status sci_port_link_down(struct isci_port *iport, struct isci_phy *iphy); struct isci_request; struct isci_remote_device; -enum sci_status scic_sds_port_start_io( +enum sci_status sci_port_start_io( struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_port_complete_io( +enum sci_status sci_port_complete_io( struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq); -enum sas_linkrate scic_sds_port_get_max_allowed_speed( +enum sas_linkrate sci_port_get_max_allowed_speed( struct isci_port *iport); -void scic_sds_port_broadcast_change_received( +void sci_port_broadcast_change_received( struct isci_port *iport, struct isci_phy *iphy); -bool scic_sds_port_is_valid_phy_assignment( +bool sci_port_is_valid_phy_assignment( struct isci_port *iport, u32 phy_index); -void scic_sds_port_get_sas_address( +void sci_port_get_sas_address( struct isci_port *iport, struct sci_sas_address *sas_address); -void scic_sds_port_get_attached_sas_address( +void sci_port_get_attached_sas_address( struct isci_port *iport, struct sci_sas_address *sas_address); diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index a0a135d54e9..c8b16db6bbd 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -112,7 +112,7 @@ static s32 sci_sas_address_compare( * port. port address if the port can be found to match the phy. * NULL if there is no matching port for the phy. */ -static struct isci_port *scic_sds_port_configuration_agent_find_port( +static struct isci_port *sci_port_configuration_agent_find_port( struct isci_host *ihost, struct isci_phy *iphy) { @@ -127,14 +127,14 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( * more phys match the sent and received SAS address as this phy in which * case it should participate in the same port. */ - scic_sds_phy_get_sas_address(iphy, &phy_sas_address); - scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); + sci_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; - scic_sds_port_get_sas_address(iport, &port_sas_address); - scic_sds_port_get_attached_sas_address(iport, &port_attached_device_address); + sci_port_get_sas_address(iport, &port_sas_address); + sci_port_get_attached_sas_address(iport, &port_attached_device_address); if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) @@ -156,9 +156,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION * the port configuration is not valid for this port configuration agent. */ -static enum sci_status scic_sds_port_configuration_agent_validate_ports( +static enum sci_status sci_port_configuration_agent_validate_ports( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) + struct sci_port_configuration_agent *port_agent) { struct sci_sas_address first_address; struct sci_sas_address second_address; @@ -194,8 +194,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * PE0 and PE3 can never have the same SAS Address unless they * are part of the same x4 wide port and we have already checked * for this condition. */ - scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -207,8 +207,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * part of the same port. */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 1) { - scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[2], &second_address); + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[2], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -221,8 +221,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( * part of the same port. */ if (port_agent->phy_valid_port_range[2].min_index == 2 && port_agent->phy_valid_port_range[3].min_index == 3) { - scic_sds_phy_get_sas_address(&ihost->phys[1], &first_address); - scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); + sci_phy_get_sas_address(&ihost->phys[1], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; @@ -239,8 +239,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( /* verify all of the phys in the same port are using the same SAS address */ static enum sci_status -scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) +sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) { u32 phy_mask; u32 assigned_phy_mask; @@ -254,7 +254,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, sas_address.low = 0; for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { - phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask; + phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; if (!phy_mask) continue; @@ -269,7 +269,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); /* @@ -294,7 +294,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, while (phy_index < SCI_MAX_PHYS) { if ((phy_mask & (1 << phy_index)) == 0) continue; - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { @@ -307,7 +307,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; - scic_sds_port_add_phy(&ihost->ports[port_index], + sci_port_add_phy(&ihost->ports[port_index], &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); @@ -316,14 +316,14 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, phy_index++; } - return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); + return sci_port_configuration_agent_validate_ports(ihost, port_agent); } static void mpc_agent_timeout(unsigned long data) { u8 index; struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_port_configuration_agent *port_agent; + struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; @@ -355,8 +355,8 @@ done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } -static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_mpc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -367,10 +367,10 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, if (!iport) return; - port_agent->phy_ready_mask |= (1 << scic_sds_phy_get_index(iphy)); - scic_sds_port_link_up(iport, iphy); - if ((iport->active_phy_mask & (1 << scic_sds_phy_get_index(iphy)))) - port_agent->phy_configured_mask |= (1 << scic_sds_phy_get_index(iphy)); + port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); + sci_port_link_up(iport, iphy); + if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) + port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); } /** @@ -390,9 +390,9 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, * not associated with a port there is no action taken. Is it possible to get a * link down notification from a phy that has no assocoated port? */ -static void scic_sds_mpc_agent_link_down( +static void sci_mpc_agent_link_down( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -405,9 +405,9 @@ static void scic_sds_mpc_agent_link_down( * state. */ port_agent->phy_ready_mask &= - ~(1 << scic_sds_phy_get_index(iphy)); + ~(1 << sci_phy_get_index(iphy)); port_agent->phy_configured_mask &= - ~(1 << scic_sds_phy_get_index(iphy)); + ~(1 << sci_phy_get_index(iphy)); /* * Check to see if there are more phys waiting to be @@ -424,7 +424,7 @@ static void scic_sds_mpc_agent_link_down( SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); } - scic_sds_port_link_down(iport, iphy); + sci_port_link_down(iport, iphy); } } @@ -432,8 +432,8 @@ static void scic_sds_mpc_agent_link_down( * configuration mode. */ static enum sci_status -scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) +sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) { u8 phy_index; u8 port_index; @@ -446,11 +446,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, port_index = phy_index; /* Get the assigned SAS Address for the first PHY on the controller. */ - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); while (++phy_index < SCI_MAX_PHYS) { - scic_sds_phy_get_sas_address(&ihost->phys[phy_index], + sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); /* Verify each of the SAS address are all the same for every PHY */ @@ -465,11 +465,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, } } - return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); + return sci_port_configuration_agent_validate_ports(ihost, port_agent); } -static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_apc_agent_configure_ports(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_phy *iphy, bool start_timer) { @@ -478,10 +478,10 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, struct isci_port *iport; enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; - iport = scic_sds_port_configuration_agent_find_port(ihost, iphy); + iport = sci_port_configuration_agent_find_port(ihost, iphy); if (iport) { - if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) apc_activity = SCIC_SDS_APC_ADD_PHY; else apc_activity = SCIC_SDS_APC_SKIP_PHY; @@ -499,7 +499,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, iport = &ihost->ports[port_index]; /* First we must make sure that this PHY can be added to this Port. */ - if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* * Port contains a PHY with a greater PHY ID than the current * PHY that has gone link up. This phy can not be part of any @@ -559,7 +559,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, switch (apc_activity) { case SCIC_SDS_APC_ADD_PHY: - status = scic_sds_port_add_phy(iport, iphy); + status = sci_port_add_phy(iport, iphy); if (status == SCI_SUCCESS) { port_agent->phy_configured_mask |= (1 << iphy->phy_index); @@ -588,7 +588,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, } /** - * scic_sds_apc_agent_link_up - handle apc link up events + * sci_apc_agent_link_up - handle apc link up events * @scic: This is the controller object that receives the link up * notification. * @sci_port: This is the port object associated with the phy. If the is no @@ -599,8 +599,8 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, * notifications. Is it possible to get a link down notification from a phy * that has no assocoated port? */ -static void scic_sds_apc_agent_link_up(struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, +static void sci_apc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { @@ -609,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; - scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true); + sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); } else { /* the phy is already the part of the port */ u32 port_state = iport->sm.current_state_id; @@ -620,7 +620,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, */ BUG_ON(port_state != SCI_PORT_RESETTING); port_agent->phy_ready_mask |= 1 << phy_index; - scic_sds_port_link_up(iport, iphy); + sci_port_link_up(iport, iphy); } } @@ -637,20 +637,20 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, * possible to get a link down notification from a phy that has no assocoated * port? */ -static void scic_sds_apc_agent_link_down( +static void sci_apc_agent_link_down( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent, + struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { - port_agent->phy_ready_mask &= ~(1 << scic_sds_phy_get_index(iphy)); + port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); if (!iport) return; if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { enum sci_status status; - status = scic_sds_port_remove_phy(iport, iphy); + status = sci_port_remove_phy(iport, iphy); if (status == SCI_SUCCESS) port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); @@ -662,7 +662,7 @@ static void apc_agent_timeout(unsigned long data) { u32 index; struct sci_timer *tmr = (struct sci_timer *)data; - struct scic_sds_port_configuration_agent *port_agent; + struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; @@ -686,7 +686,7 @@ static void apc_agent_timeout(unsigned long data) if ((configure_phy_mask & (1 << index)) == 0) continue; - scic_sds_apc_agent_configure_ports(ihost, port_agent, + sci_apc_agent_configure_ports(ihost, port_agent, &ihost->phys[index], false); } @@ -706,8 +706,8 @@ done: * call is universal for both manual port configuration and automatic port * configuration modes. */ -void scic_sds_port_configuration_agent_construct( - struct scic_sds_port_configuration_agent *port_agent) +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent) { u32 index; @@ -725,29 +725,29 @@ void scic_sds_port_configuration_agent_construct( } } -enum sci_status scic_sds_port_configuration_agent_initialize( +enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, - struct scic_sds_port_configuration_agent *port_agent) + struct sci_port_configuration_agent *port_agent) { enum sci_status status; - enum scic_port_configuration_mode mode; + enum sci_port_configuration_mode mode; - mode = ihost->oem_parameters.sds1.controller.mode_type; + mode = ihost->oem_parameters.controller.mode_type; if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { - status = scic_sds_mpc_agent_validate_phy_configuration( + status = sci_mpc_agent_validate_phy_configuration( ihost, port_agent); - port_agent->link_up_handler = scic_sds_mpc_agent_link_up; - port_agent->link_down_handler = scic_sds_mpc_agent_link_down; + port_agent->link_up_handler = sci_mpc_agent_link_up; + port_agent->link_down_handler = sci_mpc_agent_link_down; sci_init_timer(&port_agent->timer, mpc_agent_timeout); } else { - status = scic_sds_apc_agent_validate_phy_configuration( + status = sci_apc_agent_validate_phy_configuration( ihost, port_agent); - port_agent->link_up_handler = scic_sds_apc_agent_link_up; - port_agent->link_down_handler = scic_sds_apc_agent_link_down; + port_agent->link_up_handler = sci_apc_agent_link_up; + port_agent->link_down_handler = sci_apc_agent_link_down; sci_init_timer(&port_agent->timer, apc_agent_timeout); } diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 99b13c19187..c7732fb2888 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -111,25 +111,15 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) return rom; } -/** - * isci_parse_oem_parameters() - This method will take OEM parameters - * from the module init parameters and copy them to oem_params. This will - * only copy values that are not set to the module parameter default values - * @oem_parameters: This parameter specifies the controller default OEM - * parameters. It is expected that this has been initialized to the default - * parameters for the controller - * - * - */ -enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem_params, +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, struct isci_orom *orom, int scu_index) { /* check for valid inputs */ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || - scu_index > orom->hdr.num_elements || !oem_params) + scu_index > orom->hdr.num_elements || !oem) return -EINVAL; - oem_params->sds1 = orom->ctrl[scu_index]; + *oem = orom->ctrl[scu_index]; return 0; } diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index e40cb5f6eba..dc007e692f4 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -74,7 +74,7 @@ #define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED /* parameters that can be set by module parameters */ -struct scic_sds_user_parameters { +struct sci_user_parameters { struct sci_phy_user_params { /** * This field specifies the NOTIFY (ENABLE SPIN UP) primitive @@ -147,30 +147,16 @@ struct scic_sds_user_parameters { }; -/* XXX kill this union */ -union scic_user_parameters { - /** - * This field specifies the user parameters specific to the - * Storage Controller Unit (SCU) Driver Standard (SDS) version - * 1. - */ - struct scic_sds_user_parameters sds1; -}; - #define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 #define SCIC_SDS_PARM_PHY_MASK_MAX 0xF #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 -struct scic_sds_oem_params; -int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); - -union scic_oem_parameters; -void scic_oem_parameters_get(struct isci_host *ihost, - union scic_oem_parameters *oem); +struct sci_oem_params; +int sci_oem_parameters_validate(struct sci_oem_params *oem); struct isci_orom; struct isci_orom *isci_request_oprom(struct pci_dev *pdev); -enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem, +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, struct isci_orom *orom, int scu_index); struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); @@ -214,7 +200,7 @@ struct isci_oem_hdr { * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs * being assigned is sufficient to declare manual PORT configuration. */ -enum scic_port_configuration_mode { +enum sci_port_configuration_mode { SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 }; @@ -230,7 +216,7 @@ struct sci_bios_oem_param_block_hdr { uint8_t reserved[8]; } __attribute__ ((packed)); -struct scic_sds_oem_params { +struct sci_oem_params { struct { uint8_t mode_type; uint8_t max_concurrent_dev_spin_up; @@ -255,19 +241,9 @@ struct scic_sds_oem_params { } phys[SCI_MAX_PHYS]; } __attribute__ ((packed)); -/* XXX kill this union */ -union scic_oem_parameters { - /** - * This field specifies the OEM parameters specific to the - * Storage Controller Unit (SCU) Driver Standard (SDS) version - * 1. - */ - struct scic_sds_oem_params sds1; -}; - struct isci_orom { struct sci_bios_oem_param_block_hdr hdr; - struct scic_sds_oem_params ctrl[SCI_MAX_CONTROLLERS]; + struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS]; } __attribute__ ((packed)); #endif diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 9043b458c99..8c752abb433 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -68,7 +68,7 @@ * @isci_host: This parameter specifies the isci host object. * @isci_device: This parameter specifies the remote device * - * scic_lock is held on entrance to this function. + * sci_lock is held on entrance to this function. */ static void isci_remote_device_not_ready(struct isci_host *ihost, struct isci_remote_device *idev, u32 reason) @@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, "%s: isci_device = %p request = %p\n", __func__, idev, ireq); - scic_controller_terminate_request(ihost, + sci_controller_terminate_request(ihost, idev, ireq); } @@ -133,7 +133,7 @@ static void rnc_destruct_done(void *_dev) sci_change_state(&idev->sm, SCI_DEV_STOPPED); } -static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) +static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) { struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status = SCI_SUCCESS; @@ -147,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem ireq->target_device != idev) continue; - s = scic_controller_terminate_request(ihost, idev, ireq); + s = sci_controller_terminate_request(ihost, idev, ireq); if (s != SCI_SUCCESS) status = s; } @@ -155,11 +155,11 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem return status; } -enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, +enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: @@ -174,7 +174,7 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, case SCI_DEV_STARTING: /* device not started so there had better be no requests */ BUG_ON(idev->started_request_count != 0); - scic_sds_remote_node_context_destruct(&idev->rnc, + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); /* Transition to the stopping state and wait for the * remote node to complete being posted and invalidated. @@ -191,28 +191,28 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, case SCI_SMP_DEV_CMD: sci_change_state(sm, SCI_DEV_STOPPING); if (idev->started_request_count == 0) { - scic_sds_remote_node_context_destruct(&idev->rnc, + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); return SCI_SUCCESS; } else - return scic_sds_remote_device_terminate_requests(idev); + return sci_remote_device_terminate_requests(idev); break; case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then * try again to terminate. */ - return scic_sds_remote_device_terminate_requests(idev); + return sci_remote_device_terminate_requests(idev); case SCI_DEV_RESETTING: sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; } } -enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) +enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: @@ -239,10 +239,10 @@ enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) } } -enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev) +enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; if (state != SCI_DEV_RESETTING) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", @@ -254,11 +254,11 @@ enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *ide return SCI_SUCCESS; } -enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, u32 suspend_type) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; if (state != SCI_STP_DEV_CMD) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", @@ -266,15 +266,15 @@ enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, return SCI_FAILURE_INVALID_STATE; } - return scic_sds_remote_node_context_suspend(&idev->rnc, + return sci_remote_node_context_suspend(&idev->rnc, suspend_type, NULL, NULL); } -enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev, +enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status; @@ -289,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", __func__, state); /* Return the frame back to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_NCQ_ERROR: @@ -302,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * void *frame_header; ssize_t word_cnt; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); if (status != SCI_SUCCESS) @@ -311,22 +311,22 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * word_cnt = sizeof(hdr) / sizeof(u32); sci_swab32_cpy(&hdr, frame_header, word_cnt); - ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag)); + ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); if (ireq && ireq->target_device == idev) { /* The IO request is now in charge of releasing the frame */ - status = scic_sds_io_request_frame_handler(ireq, frame_index); + status = sci_io_request_frame_handler(ireq, frame_index); } else { /* We could not map this tag to a valid IO * request Just toss the frame and continue */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } break; } case SCI_STP_DEV_NCQ: { struct dev_to_host_fis *hdr; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&hdr); if (status != SCI_SUCCESS) @@ -349,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * } else status = SCI_FAILURE; - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); break; } case SCI_STP_DEV_CMD: @@ -358,7 +358,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * * in this state. All unsolicited frames are forwarded to the io request * object. */ - status = scic_sds_io_request_frame_handler(idev->working_request, frame_index); + status = sci_io_request_frame_handler(idev->working_request, frame_index); break; } @@ -369,7 +369,7 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_READY: @@ -386,25 +386,25 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) } } -enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev, +enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; enum sci_status status; switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_OPS_MISC: case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: - status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code); + status = sci_remote_node_context_event_handler(&idev->rnc, event_code); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { status = SCI_SUCCESS; /* Suspend the associated RNC */ - scic_sds_remote_node_context_suspend(&idev->rnc, + sci_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); @@ -439,13 +439,13 @@ enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device * */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) - status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); + status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } return status; } -static void scic_sds_remote_device_start_request(struct isci_remote_device *idev, +static void sci_remote_device_start_request(struct isci_remote_device *idev, struct isci_request *ireq, enum sci_status status) { @@ -453,19 +453,19 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) - scic_sds_port_complete_io(iport, idev, ireq); + sci_port_complete_io(iport, idev, ireq); else { kref_get(&idev->kref); - scic_sds_remote_device_increment_request_count(idev); + sci_remote_device_increment_request_count(idev); } } -enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, +enum sci_status sci_remote_device_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -488,15 +488,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, * successful it will start the request for the port object then * increment its own request count. */ - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); break; case SCI_STP_DEV_IDLE: { /* handle the start io operation for a sata device that is in @@ -507,18 +507,18 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, * If this is a softreset we may want to have a different * substate. */ - enum scic_sds_remote_device_states new_state; + enum sci_remote_device_states new_state; struct sas_task *task = isci_request_access_task(ireq); - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; @@ -535,15 +535,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); } else return SCI_FAILURE_INVALID_STATE; break; @@ -551,15 +551,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; case SCI_SMP_DEV_IDLE: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); + status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; @@ -574,7 +574,7 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, return SCI_FAILURE_INVALID_STATE; } - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); return status; } @@ -584,24 +584,24 @@ static enum sci_status common_complete_io(struct isci_port *iport, { enum sci_status status; - status = scic_sds_request_complete(ireq); + status = sci_request_complete(ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_port_complete_io(iport, idev, ireq); + status = sci_port_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - scic_sds_remote_device_decrement_request_count(idev); + sci_remote_device_decrement_request_count(idev); return status; } -enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, +enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -636,7 +636,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); - } else if (scic_sds_remote_device_get_request_count(idev) == 0) + } else if (sci_remote_device_get_request_count(idev) == 0) sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: @@ -650,8 +650,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, if (status != SCI_SUCCESS) break; - if (scic_sds_remote_device_get_request_count(idev) == 0) - scic_sds_remote_node_context_destruct(&idev->rnc, + if (sci_remote_device_get_request_count(idev) == 0) + sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); break; @@ -668,21 +668,21 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, return status; } -static void scic_sds_remote_device_continue_request(void *dev) +static void sci_remote_device_continue_request(void *dev) { struct isci_remote_device *idev = dev; /* we need to check if this request is still valid to continue. */ if (idev->working_request) - scic_controller_continue_io(idev->working_request); + sci_controller_continue_io(idev->working_request); } -enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, +enum sci_status sci_remote_device_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; @@ -705,15 +705,15 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); + status = sci_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) goto out; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); if (status != SCI_SUCCESS) goto out; @@ -731,32 +731,32 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, * the correct action when the remote node context is suspended * and later resumed. */ - scic_sds_remote_node_context_suspend(&idev->rnc, + sci_remote_node_context_suspend(&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); - scic_sds_remote_node_context_resume(&idev->rnc, - scic_sds_remote_device_continue_request, + sci_remote_node_context_resume(&idev->rnc, + sci_remote_device_continue_request, idev); out: - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); /* We need to let the controller start request handler know that * it can't post TC yet. We will provide a callback function to * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; case SCI_DEV_READY: - status = scic_sds_port_start_io(iport, idev, ireq); + status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; - status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); + status = sci_remote_node_context_start_task(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; - status = scic_sds_request_start(ireq); + status = sci_request_start(ireq); break; } - scic_sds_remote_device_start_request(idev, ireq, status); + sci_remote_device_start_request(idev, ireq, status); return status; } @@ -769,16 +769,16 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, * This method takes the request and bulids an appropriate SCU context for the * request and then requests the controller to post the request. none */ -void scic_sds_remote_device_post_request( +void sci_remote_device_post_request( struct isci_remote_device *idev, u32 request) { u32 context; - context = scic_sds_remote_device_build_command_context(idev, request); + context = sci_remote_device_build_command_context(idev, request); - scic_sds_controller_post_request( - scic_sds_remote_device_get_controller(idev), + sci_controller_post_request( + sci_remote_device_get_controller(idev), context ); } @@ -798,7 +798,7 @@ static void remote_device_resume_done(void *_dev) sci_change_state(&idev->sm, SCI_DEV_READY); } -static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) +static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) { struct isci_remote_device *idev = _dev; struct isci_host *ihost = idev->owning_port->owning_controller; @@ -810,7 +810,7 @@ static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handl isci_remote_device_ready(ihost, idev); } -static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); @@ -819,7 +819,7 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac } /** - * scic_remote_device_destruct() - free remote node context and destruct + * sci_remote_device_destruct() - free remote node context and destruct * @remote_device: This parameter specifies the remote device to be destructed. * * Remote device objects are a limited resource. As such, they must be @@ -831,10 +831,10 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac * device isn't valid (e.g. it's already been destoryed, the handle isn't * valid, etc.). */ -static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev) +static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost; if (state != SCI_DEV_STOPPED) { @@ -844,7 +844,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id } ihost = idev->owning_port->owning_controller; - scic_sds_controller_free_remote_node_context(ihost, idev, + sci_controller_free_remote_node_context(ihost, idev, idev->rnc.remote_node_index); idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; sci_change_state(sm, SCI_DEV_FINAL); @@ -869,12 +869,12 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ * io requests in process */ BUG_ON(!list_empty(&idev->reqs_in_process)); - scic_remote_device_destruct(idev); + sci_remote_device_destruct(idev); list_del_init(&idev->node); isci_put_device(idev); } -static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; @@ -887,19 +887,19 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac if (prev_state == SCI_DEV_STOPPING) isci_remote_device_deconstruct(ihost, idev); - scic_sds_controller_remote_device_stopped(ihost, idev); + sci_controller_remote_device_stopped(ihost, idev); } -static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); } -static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; @@ -913,7 +913,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi isci_remote_device_ready(ihost, idev); } -static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) +static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; @@ -926,42 +926,42 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin } } -static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - scic_sds_remote_node_context_suspend( + sci_remote_node_context_suspend( &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); } -static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) +static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); + sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } -static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; - if (scic_sds_remote_node_context_is_ready(&idev->rnc)) { + if (sci_remote_node_context_is_ready(&idev->rnc)) { /* * Since the RNC is ready, it's alright to finish completion * processing (e.g. signal the remote device is ready). */ - scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); + sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); } else { - scic_sds_remote_node_context_resume(&idev->rnc, - scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler, + sci_remote_node_context_resume(&idev->rnc, + sci_stp_remote_device_ready_idle_substate_resume_complete_handler, idev); } } -static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); @@ -969,28 +969,28 @@ static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); } -static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) +static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) isci_remote_device_not_ready(ihost, idev, idev->not_ready_reason); } -static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); isci_remote_device_ready(ihost, idev); } -static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); + struct isci_host *ihost = sci_remote_device_get_controller(idev); BUG_ON(idev->working_request == NULL); @@ -998,83 +998,83 @@ static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); } -static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) +static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; } -static const struct sci_base_state scic_sds_remote_device_state_table[] = { +static const struct sci_base_state sci_remote_device_state_table[] = { [SCI_DEV_INITIAL] = { - .enter_state = scic_sds_remote_device_initial_state_enter, + .enter_state = sci_remote_device_initial_state_enter, }, [SCI_DEV_STOPPED] = { - .enter_state = scic_sds_remote_device_stopped_state_enter, + .enter_state = sci_remote_device_stopped_state_enter, }, [SCI_DEV_STARTING] = { - .enter_state = scic_sds_remote_device_starting_state_enter, + .enter_state = sci_remote_device_starting_state_enter, }, [SCI_DEV_READY] = { - .enter_state = scic_sds_remote_device_ready_state_enter, - .exit_state = scic_sds_remote_device_ready_state_exit + .enter_state = sci_remote_device_ready_state_enter, + .exit_state = sci_remote_device_ready_state_exit }, [SCI_STP_DEV_IDLE] = { - .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter, + .enter_state = sci_stp_remote_device_ready_idle_substate_enter, }, [SCI_STP_DEV_CMD] = { - .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter, + .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, }, [SCI_STP_DEV_NCQ] = { }, [SCI_STP_DEV_NCQ_ERROR] = { - .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter, + .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, }, [SCI_STP_DEV_AWAIT_RESET] = { }, [SCI_SMP_DEV_IDLE] = { - .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter, + .enter_state = sci_smp_remote_device_ready_idle_substate_enter, }, [SCI_SMP_DEV_CMD] = { - .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter, - .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit, + .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, + .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, }, [SCI_DEV_STOPPING] = { }, [SCI_DEV_FAILED] = { }, [SCI_DEV_RESETTING] = { - .enter_state = scic_sds_remote_device_resetting_state_enter, - .exit_state = scic_sds_remote_device_resetting_state_exit + .enter_state = sci_remote_device_resetting_state_enter, + .exit_state = sci_remote_device_resetting_state_exit }, [SCI_DEV_FINAL] = { }, }; /** - * scic_remote_device_construct() - common construction + * sci_remote_device_construct() - common construction * @sci_port: SAS/SATA port through which this device is accessed. * @sci_dev: remote device to construct * * This routine just performs benign initialization and does not * allocate the remote_node_context which is left to - * scic_remote_device_[de]a_construct(). scic_remote_device_destruct() + * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() * frees the remote_node_context(s) for the device. */ -static void scic_remote_device_construct(struct isci_port *iport, +static void sci_remote_device_construct(struct isci_port *iport, struct isci_remote_device *idev) { idev->owning_port = iport; idev->started_request_count = 0; - sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL); + sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); - scic_sds_remote_node_context_construct(&idev->rnc, + sci_remote_node_context_construct(&idev->rnc, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } /** - * scic_remote_device_da_construct() - construct direct attached device. + * sci_remote_device_da_construct() - construct direct attached device. * * The information (e.g. IAF, Signature FIS, etc.) necessary to build * the device is known to the SCI Core since it is contained in the - * scic_phy object. Remote node context(s) is/are a global resource - * allocated by this routine, freed by scic_remote_device_destruct(). + * sci_phy object. Remote node context(s) is/are a global resource + * allocated by this routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. @@ -1082,20 +1082,20 @@ static void scic_remote_device_construct(struct isci_port *iport, * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ -static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, +static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, struct isci_remote_device *idev) { enum sci_status status; struct domain_device *dev = idev->domain_dev; - scic_remote_device_construct(iport, idev); + sci_remote_device_construct(iport, idev); /* * This information is request to determine how many remote node context * entries will be needed to store the remote node. */ idev->is_direct_attached = true; - status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, + status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); @@ -1108,7 +1108,7 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport); + idev->connection_rate = sci_port_get_max_allowed_speed(iport); /* / @todo Should I assign the port width by reading all of the phys on the port? */ idev->device_port_width = 1; @@ -1117,10 +1117,10 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, } /** - * scic_remote_device_ea_construct() - construct expander attached device + * sci_remote_device_ea_construct() - construct expander attached device * * Remote node context(s) is/are a global resource allocated by this - * routine, freed by scic_remote_device_destruct(). + * routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. @@ -1128,15 +1128,15 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ -static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, +static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; enum sci_status status; - scic_remote_device_construct(iport, idev); + sci_remote_device_construct(iport, idev); - status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, + status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) @@ -1155,7 +1155,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * connection the logical link rate is that same as the * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay * one another, so this code works for both situations. */ - idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport), + idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), dev->linkrate); /* / @todo Should I assign the port width by reading all of the phys on the port? */ @@ -1165,7 +1165,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, } /** - * scic_remote_device_start() - This method will start the supplied remote + * sci_remote_device_start() - This method will start the supplied remote * device. This method enables normal IO requests to flow through to the * remote device. * @remote_device: This parameter specifies the device to be started. @@ -1177,11 +1177,11 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start * the device when there have been no phys added to it. */ -static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, +static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; - enum scic_sds_remote_device_states state = sm->current_state_id; + enum sci_remote_device_states state = sm->current_state_id; enum sci_status status; if (state != SCI_DEV_STOPPED) { @@ -1190,7 +1190,7 @@ static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, return SCI_FAILURE_INVALID_STATE; } - status = scic_sds_remote_node_context_resume(&idev->rnc, + status = sci_remote_node_context_resume(&idev->rnc, remote_device_resume_done, idev); if (status != SCI_SUCCESS) @@ -1209,9 +1209,9 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, enum sci_status status; if (dev->parent && dev_is_expander(dev->parent)) - status = scic_remote_device_ea_construct(iport, idev); + status = sci_remote_device_ea_construct(iport, idev); else - status = scic_remote_device_da_construct(iport, idev); + status = sci_remote_device_da_construct(iport, idev); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", @@ -1221,7 +1221,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, } /* start the device. */ - status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); + status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); if (status != SCI_SUCCESS) dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", @@ -1322,7 +1322,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem set_bit(IDEV_STOP_PENDING, &idev->flags); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_stop(idev, 50); + status = sci_remote_device_stop(idev, 50); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the stop complete callback. */ diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index bc4da20a13f..fa9a0e6cc30 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -61,7 +61,7 @@ #include "remote_node_context.h" #include "port.h" -enum scic_remote_device_not_ready_reason_code { +enum sci_remote_device_not_ready_reason_code { SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, @@ -97,7 +97,7 @@ struct isci_remote_device { enum sas_linkrate connection_rate; bool is_direct_attached; struct isci_port *owning_port; - struct scic_sds_remote_node_context rnc; + struct sci_remote_node_context rnc; /* XXX unify with device reference counting and delete */ u32 started_request_count; struct isci_request *working_request; @@ -106,7 +106,7 @@ struct isci_remote_device { #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 -/* device reference routines must be called under scic_lock */ +/* device reference routines must be called under sci_lock */ static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) { struct isci_remote_device *idev = dev->lldd_dev; @@ -137,7 +137,7 @@ bool isci_device_is_reset_pending(struct isci_host *ihost, void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev); /** - * scic_remote_device_stop() - This method will stop both transmission and + * sci_remote_device_stop() - This method will stop both transmission and * reception of link activity for the supplied remote device. This method * disables normal IO requests from flowing through to the remote device. * @remote_device: This parameter specifies the device to be stopped. @@ -148,12 +148,12 @@ void isci_device_clear_reset_pending(struct isci_host *ihost, * This value is returned if the transmission and reception for the device was * successfully stopped. */ -enum sci_status scic_remote_device_stop( +enum sci_status sci_remote_device_stop( struct isci_remote_device *idev, u32 timeout); /** - * scic_remote_device_reset() - This method will reset the device making it + * sci_remote_device_reset() - This method will reset the device making it * ready for operation. This method must be called anytime the device is * reset either through a SMP phy control or a port hard reset request. * @remote_device: This parameter specifies the device to be reset. @@ -164,11 +164,11 @@ enum sci_status scic_remote_device_stop( * was accepted. SCI_SUCCESS This value is returned if the device reset is * started. */ -enum sci_status scic_remote_device_reset( +enum sci_status sci_remote_device_reset( struct isci_remote_device *idev); /** - * scic_remote_device_reset_complete() - This method informs the device object + * sci_remote_device_reset_complete() - This method informs the device object * that the reset operation is complete and the device can resume operation * again. * @remote_device: This parameter specifies the device which is to be informed @@ -177,18 +177,16 @@ enum sci_status scic_remote_device_reset( * An indication that the device is resuming operation. SCI_SUCCESS the device * is resuming operation. */ -enum sci_status scic_remote_device_reset_complete( +enum sci_status sci_remote_device_reset_complete( struct isci_remote_device *idev); -#define scic_remote_device_is_atapi(device_handle) false - /** - * enum scic_sds_remote_device_states - This enumeration depicts all the states + * enum sci_remote_device_states - This enumeration depicts all the states * for the common remote device state machine. * * */ -enum scic_sds_remote_device_states { +enum sci_remote_device_states { /** * Simply the initial state for the base remote device state machine. */ @@ -293,7 +291,7 @@ enum scic_sds_remote_device_states { SCI_DEV_FINAL, }; -static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) +static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) { struct isci_remote_device *idev; @@ -308,122 +306,120 @@ static inline bool dev_is_expander(struct domain_device *dev) } /** - * scic_sds_remote_device_increment_request_count() - + * sci_remote_device_increment_request_count() - * * This macro incrments the request count for this device */ -#define scic_sds_remote_device_increment_request_count(idev) \ +#define sci_remote_device_increment_request_count(idev) \ ((idev)->started_request_count++) /** - * scic_sds_remote_device_decrement_request_count() - + * sci_remote_device_decrement_request_count() - * * This macro decrements the request count for this device. This count will * never decrment past 0. */ -#define scic_sds_remote_device_decrement_request_count(idev) \ +#define sci_remote_device_decrement_request_count(idev) \ ((idev)->started_request_count > 0 ? \ (idev)->started_request_count-- : 0) /** - * scic_sds_remote_device_get_request_count() - + * sci_remote_device_get_request_count() - * * This is a helper macro to return the current device request count. */ -#define scic_sds_remote_device_get_request_count(idev) \ +#define sci_remote_device_get_request_count(idev) \ ((idev)->started_request_count) /** - * scic_sds_remote_device_get_controller() - + * sci_remote_device_get_controller() - * * This macro returns the controller object that contains this device object */ -#define scic_sds_remote_device_get_controller(idev) \ - scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev)) +#define sci_remote_device_get_controller(idev) \ + sci_port_get_controller(sci_remote_device_get_port(idev)) /** - * scic_sds_remote_device_get_port() - + * sci_remote_device_get_port() - * * This macro returns the owning port of this device */ -#define scic_sds_remote_device_get_port(idev) \ +#define sci_remote_device_get_port(idev) \ ((idev)->owning_port) /** - * scic_sds_remote_device_get_controller_peg() - + * sci_remote_device_get_controller_peg() - * * This macro returns the controllers protocol engine group */ -#define scic_sds_remote_device_get_controller_peg(idev) \ +#define sci_remote_device_get_controller_peg(idev) \ (\ - scic_sds_controller_get_protocol_engine_group(\ - scic_sds_port_get_controller(\ - scic_sds_remote_device_get_port(idev) \ + sci_controller_get_protocol_engine_group(\ + sci_port_get_controller(\ + sci_remote_device_get_port(idev) \ ) \ ) \ ) /** - * scic_sds_remote_device_get_index() - + * sci_remote_device_get_index() - * * This macro returns the remote node index for this device object */ -#define scic_sds_remote_device_get_index(idev) \ +#define sci_remote_device_get_index(idev) \ ((idev)->rnc.remote_node_index) /** - * scic_sds_remote_device_build_command_context() - + * sci_remote_device_build_command_context() - * * This macro builds a remote device context for the SCU post request operation */ -#define scic_sds_remote_device_build_command_context(device, command) \ +#define sci_remote_device_build_command_context(device, command) \ ((command) \ - | (scic_sds_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ + | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ - | (scic_sds_remote_device_get_index((device))) \ + | (sci_remote_device_get_index((device))) \ ) /** - * scic_sds_remote_device_set_working_request() - + * sci_remote_device_set_working_request() - * * This macro makes the working request assingment for the remote device * object. To clear the working request use this macro with a NULL request * object. */ -#define scic_sds_remote_device_set_working_request(device, request) \ +#define sci_remote_device_set_working_request(device, request) \ ((device)->working_request = (request)) -enum sci_status scic_sds_remote_device_frame_handler( +enum sci_status sci_remote_device_frame_handler( struct isci_remote_device *idev, u32 frame_index); -enum sci_status scic_sds_remote_device_event_handler( +enum sci_status sci_remote_device_event_handler( struct isci_remote_device *idev, u32 event_code); -enum sci_status scic_sds_remote_device_start_io( +enum sci_status sci_remote_device_start_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_start_task( +enum sci_status sci_remote_device_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_complete_io( +enum sci_status sci_remote_device_complete_io( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status scic_sds_remote_device_suspend( +enum sci_status sci_remote_device_suspend( struct isci_remote_device *idev, u32 suspend_type); -void scic_sds_remote_device_post_request( +void sci_remote_device_post_request( struct isci_remote_device *idev, u32 request); -#define scic_sds_remote_device_is_atapi(idev) false - #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 8a5203b6eb0..c2dfd5a7218 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -81,8 +81,8 @@ * otherwise it will return false bool true if the remote node context is in * the ready state. false if the remote node context is not in the ready state. */ -bool scic_sds_remote_node_context_is_ready( - struct scic_sds_remote_node_context *sci_rnc) +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; @@ -93,15 +93,16 @@ bool scic_sds_remote_node_context_is_ready( return false; } -/** - * - * @sci_dev: The remote device to use to construct the RNC buffer. - * @rnc: The buffer into which the remote device data will be copied. - * - * This method will construct the RNC buffer for this remote device object. none - */ -static void scic_sds_remote_node_context_construct_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) +{ + if (id < ihost->remote_node_entries && + ihost->device_table[id]) + return &ihost->remote_node_context_table[id]; + + return NULL; +} + +static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; @@ -110,11 +111,11 @@ static void scic_sds_remote_node_context_construct_buffer( struct isci_host *ihost; __le64 sas_addr; - ihost = scic_sds_remote_device_get_controller(idev); - rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni); + ihost = sci_remote_device_get_controller(idev); + rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) - * scic_sds_remote_device_node_count(idev)); + * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; @@ -135,14 +136,14 @@ static void scic_sds_remote_node_context_construct_buffer( if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { rnc->ssp.connection_occupancy_timeout = - ihost->user_parameters.sds1.stp_max_occupancy_timeout; + ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - ihost->user_parameters.sds1.stp_inactivity_timeout; + ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = - ihost->user_parameters.sds1.ssp_max_occupancy_timeout; + ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = - ihost->user_parameters.sds1.ssp_inactivity_timeout; + ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; @@ -164,8 +165,8 @@ static void scic_sds_remote_node_context_construct_buffer( * to its ready state. If the remote node context is already setup to * transition to its final state then this function does nothing. none */ -static void scic_sds_remote_node_context_setup_to_resume( - struct scic_sds_remote_node_context *sci_rnc, +static void sci_remote_node_context_setup_to_resume( + struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { @@ -176,8 +177,8 @@ static void scic_sds_remote_node_context_setup_to_resume( } } -static void scic_sds_remote_node_context_setup_to_destory( - struct scic_sds_remote_node_context *sci_rnc, +static void sci_remote_node_context_setup_to_destory( + struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { @@ -192,8 +193,8 @@ static void scic_sds_remote_node_context_setup_to_destory( * This method just calls the user callback function and then resets the * callback. */ -static void scic_sds_remote_node_context_notify_user( - struct scic_sds_remote_node_context *rnc) +static void sci_remote_node_context_notify_user( + struct sci_remote_node_context *rnc) { if (rnc->user_callback != NULL) { (*rnc->user_callback)(rnc->user_cookie); @@ -203,99 +204,80 @@ static void scic_sds_remote_node_context_notify_user( } } -static void scic_sds_remote_node_context_continue_state_transitions(struct scic_sds_remote_node_context *rnc) +static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) - scic_sds_remote_node_context_resume(rnc, rnc->user_callback, + sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); } -/** - * - * @sci_rnc: The remote node context object that is to be validated. - * - * This method will mark the rnc buffer as being valid and post the request to - * the hardware. none - */ -static void scic_sds_remote_node_context_validate_context_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { + union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; - union scu_remote_node_context *rnc_buffer; + struct isci_host *ihost = idev->owning_port->owning_controller; - rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( - scic_sds_remote_device_get_controller(idev), - sci_rnc->remote_node_index - ); + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (!idev->is_direct_attached && (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { - scic_sds_remote_device_post_request(idev, - SCU_CONTEXT_COMMAND_POST_RNC_96); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { - scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); - if (idev->is_direct_attached) { - scic_sds_port_setup_transports(idev->owning_port, - sci_rnc->remote_node_index); - } + if (idev->is_direct_attached) + sci_port_setup_transports(idev->owning_port, + sci_rnc->remote_node_index); } } -/** - * - * @sci_rnc: The remote node context object that is to be invalidated. - * - * This method will update the RNC buffer and post the invalidate request. none - */ -static void scic_sds_remote_node_context_invalidate_context_buffer( - struct scic_sds_remote_node_context *sci_rnc) +static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; - rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( - scic_sds_remote_device_get_controller(rnc_to_dev(sci_rnc)), - sci_rnc->remote_node_index); + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = false; - scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), - SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); + sci_remote_device_post_request(rnc_to_dev(sci_rnc), + SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); } -static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - scic_sds_remote_node_context_notify_user(rnc); + sci_remote_node_context_notify_user(rnc); } } -static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); + struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); - scic_sds_remote_node_context_validate_context_buffer(sci_rnc); + sci_remote_node_context_validate_context_buffer(sci_rnc); } -static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_invalidate_context_buffer(rnc); + sci_remote_node_context_invalidate_context_buffer(rnc); } -static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; @@ -310,73 +292,73 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st */ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && idev->is_direct_attached) - scic_sds_port_setup_transports(idev->owning_port, + sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); - scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } -static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; if (rnc->user_callback) - scic_sds_remote_node_context_notify_user(rnc); + sci_remote_node_context_notify_user(rnc); } -static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_continue_state_transitions(rnc); + sci_remote_node_context_continue_state_transitions(rnc); } -static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) +static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { - struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); - scic_sds_remote_node_context_continue_state_transitions(rnc); + sci_remote_node_context_continue_state_transitions(rnc); } -static const struct sci_base_state scic_sds_remote_node_context_state_table[] = { +static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { - .enter_state = scic_sds_remote_node_context_initial_state_enter, + .enter_state = sci_remote_node_context_initial_state_enter, }, [SCI_RNC_POSTING] = { - .enter_state = scic_sds_remote_node_context_posting_state_enter, + .enter_state = sci_remote_node_context_posting_state_enter, }, [SCI_RNC_INVALIDATING] = { - .enter_state = scic_sds_remote_node_context_invalidating_state_enter, + .enter_state = sci_remote_node_context_invalidating_state_enter, }, [SCI_RNC_RESUMING] = { - .enter_state = scic_sds_remote_node_context_resuming_state_enter, + .enter_state = sci_remote_node_context_resuming_state_enter, }, [SCI_RNC_READY] = { - .enter_state = scic_sds_remote_node_context_ready_state_enter, + .enter_state = sci_remote_node_context_ready_state_enter, }, [SCI_RNC_TX_SUSPENDED] = { - .enter_state = scic_sds_remote_node_context_tx_suspended_state_enter, + .enter_state = sci_remote_node_context_tx_suspended_state_enter, }, [SCI_RNC_TX_RX_SUSPENDED] = { - .enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter, + .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, [SCI_RNC_AWAIT_SUSPENSION] = { }, }; -void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index) { - memset(rnc, 0, sizeof(struct scic_sds_remote_node_context)); + memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; - sci_init_sm(&rnc->sm, scic_sds_remote_node_context_state_table, SCI_RNC_INITIAL); + sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } -enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code) { enum scis_sds_remote_node_context_states state; @@ -476,7 +458,7 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot } -enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { @@ -485,7 +467,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: - scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: @@ -493,7 +475,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: - scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; case SCI_RNC_INITIAL: @@ -511,7 +493,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod } } -enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, scics_sds_remote_node_context_callback cb_fn, void *cb_p) @@ -530,7 +512,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node sci_rnc->suspension_code = suspend_type; if (suspend_type == SCI_SOFTWARE_SUSPENSION) { - scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), + sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); } @@ -538,7 +520,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node return SCI_SUCCESS; } -enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { @@ -550,8 +532,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - scic_sds_remote_node_context_construct_buffer(sci_rnc); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); return SCI_SUCCESS; case SCI_RNC_POSTING: @@ -567,7 +549,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) @@ -584,11 +566,11 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ return SCI_SUCCESS; } case SCI_RNC_TX_RX_SUSPENDED: - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); return SCI_FAILURE_INVALID_STATE; case SCI_RNC_AWAIT_SUSPENSION: - scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), @@ -597,7 +579,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ } } -enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; @@ -622,7 +604,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; @@ -635,7 +617,7 @@ enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_n return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: - scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL); + sci_remote_node_context_resume(sci_rnc, NULL, NULL); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 7a24c7a12de..b475c5c2664 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -80,7 +80,7 @@ struct isci_request; struct isci_remote_device; -struct scic_sds_remote_node_context; +struct sci_remote_node_context; typedef void (*scics_sds_remote_node_context_callback)(void *); @@ -147,19 +147,19 @@ enum scis_sds_remote_node_context_states { * This enumeration is used to define the end destination state for the remote * node context. */ -enum scic_sds_remote_node_context_destination_state { +enum sci_remote_node_context_destination_state { SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL }; /** - * struct scic_sds_remote_node_context - This structure contains the data + * struct sci_remote_node_context - This structure contains the data * associated with the remote node context object. The remote node context * (RNC) object models the the remote device information necessary to manage * the silicon RNC. */ -struct scic_sds_remote_node_context { +struct sci_remote_node_context { /** * This field indicates the remote node index (RNI) associated with * this RNC. @@ -177,7 +177,7 @@ struct scic_sds_remote_node_context { * state. This can cause an automatic resume on receiving a suspension * notification. */ - enum scic_sds_remote_node_context_destination_state destination_state; + enum sci_remote_node_context_destination_state destination_state; /** * This field contains the callback function that the user requested to be @@ -197,31 +197,31 @@ struct scic_sds_remote_node_context { struct sci_base_state_machine sm; }; -void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index); -bool scic_sds_remote_node_context_is_ready( - struct scic_sds_remote_node_context *sci_rnc); +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc); -#define scic_sds_remote_node_context_get_remote_node_index(rcn) \ +#define sci_remote_node_context_get_remote_node_index(rcn) \ ((rnc)->remote_node_index) -enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code); -enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter); -enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, scics_sds_remote_node_context_callback cb_fn, void *cb_p); -enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p); -enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq); -enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq); #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c index 6b9465a5a21..301b3141945 100644 --- a/drivers/scsi/isci/remote_node_table.c +++ b/drivers/scsi/isci/remote_node_table.c @@ -74,8 +74,8 @@ * just bit position. u32 This is the absolute bit position for an available * group. */ -static u32 scic_sds_remote_node_table_get_group_index( - struct scic_remote_node_table *remote_node_table, +static u32 sci_remote_node_table_get_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 dword_index; @@ -108,8 +108,8 @@ static u32 scic_sds_remote_node_table_get_group_index( * This method will clear the group index entry in the specified group index * table. none */ -static void scic_sds_remote_node_table_clear_group_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { @@ -138,8 +138,8 @@ static void scic_sds_remote_node_table_clear_group_index( * This method will set the group index bit entry in the specified gropu index * table. none */ -static void scic_sds_remote_node_table_set_group_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_group_index( + struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { @@ -167,8 +167,8 @@ static void scic_sds_remote_node_table_set_group_index( * This method will set the remote to available in the remote node allocation * table. none */ -static void scic_sds_remote_node_table_set_node_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; @@ -200,8 +200,8 @@ static void scic_sds_remote_node_table_set_node_index( * This method clears the remote node index from the table of available remote * nodes. none */ -static void scic_sds_remote_node_table_clear_node_index( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; @@ -231,8 +231,8 @@ static void scic_sds_remote_node_table_clear_node_index( * * This method clears the entire table slot at the specified slot index. none */ -static void scic_sds_remote_node_table_clear_group( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_clear_group( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -258,8 +258,8 @@ static void scic_sds_remote_node_table_clear_group( * * THis method sets an entire remote node group in the remote node table. */ -static void scic_sds_remote_node_table_set_group( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_set_group( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -288,8 +288,8 @@ static void scic_sds_remote_node_table_set_group( * This method will return the group value for the specified group index. The * bit values at the specified remote node group index. */ -static u8 scic_sds_remote_node_table_get_group_value( - struct scic_remote_node_table *remote_node_table, +static u8 sci_remote_node_table_get_group_value( + struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; @@ -313,8 +313,8 @@ static u8 scic_sds_remote_node_table_get_group_value( * * This method will initialize the remote node table for use. none */ -void scic_sds_remote_node_table_initialize( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, u32 remote_node_entries) { u32 index; @@ -342,7 +342,7 @@ void scic_sds_remote_node_table_initialize( /* Initialize each full DWORD to a FULL SET of remote nodes */ for (index = 0; index < remote_node_entries; index++) { - scic_sds_remote_node_table_set_node_index(remote_node_table, index); + sci_remote_node_table_set_node_index(remote_node_table, index); } remote_node_table->group_array_size = (u16) @@ -353,14 +353,14 @@ void scic_sds_remote_node_table_initialize( /* * These are all guaranteed to be full slot values so fill them in the * available sets of 3 remote nodes */ - scic_sds_remote_node_table_set_group_index(remote_node_table, 2, index); + sci_remote_node_table_set_group_index(remote_node_table, 2, index); } /* Now fill in any remainders that we may find */ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { - scic_sds_remote_node_table_set_group_index(remote_node_table, 1, index); + sci_remote_node_table_set_group_index(remote_node_table, 1, index); } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { - scic_sds_remote_node_table_set_group_index(remote_node_table, 0, index); + sci_remote_node_table_set_group_index(remote_node_table, 0, index); } } @@ -379,8 +379,8 @@ void scic_sds_remote_node_table_initialize( * updated. The RNi value or an invalid remote node context if an RNi can not * be found. */ -static u16 scic_sds_remote_node_table_allocate_single_remote_node( - struct scic_remote_node_table *remote_node_table, +static u16 sci_remote_node_table_allocate_single_remote_node( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u8 index; @@ -388,12 +388,12 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; - group_index = scic_sds_remote_node_table_get_group_index( + group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); /* We could not find an available slot in the table selector 0 */ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { - group_value = scic_sds_remote_node_table_get_group_value( + group_value = sci_remote_node_table_get_group_value( remote_node_table, group_index); for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { @@ -402,16 +402,16 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT + index); - scic_sds_remote_node_table_clear_group_index( + sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); - scic_sds_remote_node_table_clear_node_index( + sci_remote_node_table_clear_node_index( remote_node_table, remote_node_index ); if (group_table_index > 0) { - scic_sds_remote_node_table_set_group_index( + sci_remote_node_table_set_group_index( remote_node_table, group_table_index - 1, group_index ); } @@ -436,24 +436,24 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( * The remote node index that represents three consecutive remote node entries * or an invalid remote node context if none can be found. */ -static u16 scic_sds_remote_node_table_allocate_triple_remote_node( - struct scic_remote_node_table *remote_node_table, +static u16 sci_remote_node_table_allocate_triple_remote_node( + struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; - group_index = scic_sds_remote_node_table_get_group_index( + group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; - scic_sds_remote_node_table_clear_group_index( + sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); - scic_sds_remote_node_table_clear_group( + sci_remote_node_table_clear_group( remote_node_table, group_index ); } @@ -473,31 +473,31 @@ static u16 scic_sds_remote_node_table_allocate_triple_remote_node( * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is * the remote node index that is returned or an invalid remote node context. */ -u16 scic_sds_remote_node_table_allocate_remote_node( - struct scic_remote_node_table *remote_node_table, +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count) { u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 0); if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 1); } if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = - scic_sds_remote_node_table_allocate_single_remote_node( + sci_remote_node_table_allocate_single_remote_node( remote_node_table, 2); } } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { remote_node_index = - scic_sds_remote_node_table_allocate_triple_remote_node( + sci_remote_node_table_allocate_triple_remote_node( remote_node_table, 2); } @@ -511,8 +511,8 @@ u16 scic_sds_remote_node_table_allocate_remote_node( * This method will free a single remote node index back to the remote node * table. This routine will update the remote node groups */ -static void scic_sds_remote_node_table_release_single_remote_node( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_release_single_remote_node( + struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; @@ -520,7 +520,7 @@ static void scic_sds_remote_node_table_release_single_remote_node( group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; - group_value = scic_sds_remote_node_table_get_group_value(remote_node_table, group_index); + group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); /* * Assert that we are not trying to add an entry to a slot that is already @@ -531,22 +531,22 @@ static void scic_sds_remote_node_table_release_single_remote_node( /* * There are no entries in this slot so it must be added to the single * slot table. */ - scic_sds_remote_node_table_set_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); } else if ((group_value & (group_value - 1)) == 0) { /* * There is only one entry in this slot so it must be moved from the * single slot table to the dual slot table */ - scic_sds_remote_node_table_clear_group_index(remote_node_table, 0, group_index); - scic_sds_remote_node_table_set_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); } else { /* * There are two entries in the slot so it must be moved from the dual * slot table to the tripple slot table. */ - scic_sds_remote_node_table_clear_group_index(remote_node_table, 1, group_index); - scic_sds_remote_node_table_set_group_index(remote_node_table, 2, group_index); + sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); } - scic_sds_remote_node_table_set_node_index(remote_node_table, remote_node_index); + sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); } /** @@ -557,19 +557,19 @@ static void scic_sds_remote_node_table_release_single_remote_node( * This method will release a group of three consecutive remote nodes back to * the free remote nodes. */ -static void scic_sds_remote_node_table_release_triple_remote_node( - struct scic_remote_node_table *remote_node_table, +static void sci_remote_node_table_release_triple_remote_node( + struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; - scic_sds_remote_node_table_set_group_index( + sci_remote_node_table_set_group_index( remote_node_table, 2, group_index ); - scic_sds_remote_node_table_set_group(remote_node_table, group_index); + sci_remote_node_table_set_group(remote_node_table, group_index); } /** @@ -582,16 +582,16 @@ static void scic_sds_remote_node_table_release_triple_remote_node( * This method will release the remote node index back into the remote node * table free pool. */ -void scic_sds_remote_node_table_release_remote_node_index( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count, u16 remote_node_index) { if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { - scic_sds_remote_node_table_release_single_remote_node( + sci_remote_node_table_release_single_remote_node( remote_node_table, remote_node_index); } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { - scic_sds_remote_node_table_release_triple_remote_node( + sci_remote_node_table_release_triple_remote_node( remote_node_table, remote_node_index); } } diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h index 5737d9a30cc..721ab982d2a 100644 --- a/drivers/scsi/isci/remote_node_table.h +++ b/drivers/scsi/isci/remote_node_table.h @@ -130,11 +130,11 @@ #define SCU_SATA_REMOTE_NODE_COUNT 1 /** - * struct scic_remote_node_table - + * struct sci_remote_node_table - * * */ -struct scic_remote_node_table { +struct sci_remote_node_table { /** * This field contains the array size in dwords */ @@ -172,16 +172,16 @@ struct scic_remote_node_table { /* --------------------------------------------------------------------------- */ -void scic_sds_remote_node_table_initialize( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, u32 remote_node_entries); -u16 scic_sds_remote_node_table_allocate_remote_node( - struct scic_remote_node_table *remote_node_table, +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count); -void scic_sds_remote_node_table_release_remote_node_index( - struct scic_remote_node_table *remote_node_table, +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, u32 remote_node_count, u16 remote_node_index); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 36e674896bc..bcb3c08c19a 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -89,7 +89,7 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, return ihost->task_context_dma + offset; } - return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); + return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); } static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) @@ -100,7 +100,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) e->address_modifier = 0; } -static void scic_sds_request_build_sgl(struct isci_request *ireq) +static void sci_request_build_sgl(struct isci_request *ireq) { struct isci_host *ihost = ireq->isci_host; struct sas_task *task = isci_request_access_task(ireq); @@ -158,7 +158,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) } } -static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) +static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) { struct ssp_cmd_iu *cmd_iu; struct sas_task *task = isci_request_access_task(ireq); @@ -178,7 +178,7 @@ static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) sizeof(task->ssp_task.cdb) / sizeof(u32)); } -static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) +static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) { struct ssp_task_iu *task_iu; struct sas_task *task = isci_request_access_task(ireq); @@ -211,8 +211,8 @@ static void scu_ssp_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -220,13 +220,13 @@ static void scu_ssp_reqeust_construct_task_context( task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = scic_sds_port_get_index(iport); + sci_controller_get_protocol_engine_group(controller); + task_context->logical_port_index = sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = scic_sds_remote_device_get_index(idev); + task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -242,9 +242,9 @@ static void scu_ssp_reqeust_construct_task_context( task_context->task_phase = 0x01; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(controller) << + (sci_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); @@ -252,7 +252,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the command buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); @@ -261,7 +261,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the response buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); task_context->response_iu_upper = upper_32_bits(dma_addr); task_context->response_iu_lower = lower_32_bits(dma_addr); @@ -298,7 +298,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, task_context->transfer_length_bytes = len; if (task_context->transfer_length_bytes > 0) - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); } /** @@ -349,8 +349,8 @@ static void scu_sata_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -358,14 +358,14 @@ static void scu_sata_reqeust_construct_task_context( task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(controller); + sci_controller_get_protocol_engine_group(controller); task_context->logical_port_index = - scic_sds_port_get_index(iport); + sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = scic_sds_remote_device_get_index(idev); + task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -385,9 +385,9 @@ static void scu_sata_reqeust_construct_task_context( task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(controller) << + (sci_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* @@ -395,7 +395,7 @@ static void scu_sata_reqeust_construct_task_context( * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC. */ - dma_addr = scic_io_request_get_dma_addr(ireq, + dma_addr = sci_io_request_get_dma_addr(ireq, ((char *) &ireq->stp.cmd) + sizeof(u32)); @@ -420,7 +420,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); } -static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, +static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, bool copy_rx_frame) { struct isci_stp_request *stp_req = &ireq->stp.req; @@ -432,7 +432,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ @@ -454,7 +454,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ -static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, +static void sci_stp_optimized_request_construct(struct isci_request *ireq, u8 optimized_task_type, u32 len, enum dma_data_direction dir) @@ -465,7 +465,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, scu_sata_reqeust_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ - scic_sds_request_build_sgl(ireq); + sci_request_build_sgl(ireq); /* Copy over the number of bytes to be transfered */ task_context->transfer_length_bytes = len; @@ -490,7 +490,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, static enum sci_status -scic_io_request_construct_sata(struct isci_request *ireq, +sci_io_request_construct_sata(struct isci_request *ireq, u32 len, enum dma_data_direction dir, bool copy) @@ -533,7 +533,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, /* NCQ */ if (task->ata_task.use_ncq) { - scic_sds_stp_optimized_request_construct(ireq, + sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_FPDMAQ_READ, len, dir); return SCI_SUCCESS; @@ -541,17 +541,17 @@ scic_io_request_construct_sata(struct isci_request *ireq, /* DMA */ if (task->ata_task.dma_xfer) { - scic_sds_stp_optimized_request_construct(ireq, + sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_DMA_IN, len, dir); return SCI_SUCCESS; } else /* PIO */ - return scic_sds_stp_pio_request_construct(ireq, copy); + return sci_stp_pio_request_construct(ireq, copy); return status; } -static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) +static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); @@ -561,28 +561,28 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request * task->data_dir, task->total_xfer_len); - scic_sds_io_request_build_ssp_command_iu(ireq); + sci_io_request_build_ssp_command_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -enum sci_status scic_task_request_construct_ssp( +enum sci_status sci_task_request_construct_ssp( struct isci_request *ireq) { /* Construct the SSP Task SCU Task Context */ scu_ssp_task_request_construct_task_context(ireq); /* Fill in the SSP Task IU */ - scic_sds_task_request_build_ssp_task_iu(ireq); + sci_task_request_build_ssp_task_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) +static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) { enum sci_status status; bool copy = false; @@ -592,7 +592,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request copy = (task->data_dir == DMA_NONE) ? false : true; - status = scic_io_request_construct_sata(ireq, + status = sci_io_request_construct_sata(ireq, task->total_xfer_len, task->data_dir, copy); @@ -603,7 +603,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request return status; } -enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) +enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) { enum sci_status status = SCI_SUCCESS; @@ -648,7 +648,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) * BAR1 is the scu_registers * 0x20002C = 0x200000 + 0x2c * = start of task context SRAM + offset of (type.ssp.data_offset) - * TCi is the io_tag of struct scic_sds_request + * TCi is the io_tag of struct sci_request */ ret_val = readl(scu_reg_base + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + @@ -658,7 +658,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) return ret_val; } -enum sci_status scic_sds_request_start(struct isci_request *ireq) +enum sci_status sci_request_start(struct isci_request *ireq) { enum sci_base_request_states state; struct scu_task_context *tc = ireq->tc; @@ -708,7 +708,7 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq) } enum sci_status -scic_sds_io_request_terminate(struct isci_request *ireq) +sci_io_request_terminate(struct isci_request *ireq) { enum sci_base_request_states state; @@ -716,7 +716,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) switch (state) { case SCI_REQ_CONSTRUCTED: - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); @@ -759,7 +759,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_request_complete(struct isci_request *ireq) +enum sci_status sci_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; @@ -770,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) return SCI_FAILURE_INVALID_STATE; if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) - scic_sds_controller_release_frame(ihost, + sci_controller_release_frame(ihost, ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ @@ -778,7 +778,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) return SCI_SUCCESS; } -enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, +enum sci_status sci_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; @@ -818,7 +818,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, * @sci_req: This parameter specifies the request object for which to copy * the response data. */ -static void scic_sds_io_request_copy_response(struct isci_request *ireq) +static void sci_io_request_copy_response(struct isci_request *ireq) { void *resp_buf; u32 len; @@ -848,7 +848,7 @@ request_started_state_tc_event(struct isci_request *ireq, */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -868,11 +868,11 @@ request_started_state_tc_event(struct isci_request *ireq, word_cnt); if (resp->status == 0) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS_IO_DONE_EARLY); } else { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } @@ -885,7 +885,7 @@ request_started_state_tc_event(struct isci_request *ireq, &ireq->ssp.rsp, word_cnt); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -900,11 +900,11 @@ request_started_state_tc_event(struct isci_request *ireq, datapres = resp_iu->datapres; if (datapres == 1 || datapres == 2) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -921,12 +921,12 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (ireq->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); } else { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -944,7 +944,7 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); @@ -967,7 +967,7 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: - scic_sds_request_set_status( + sci_request_set_status( ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, @@ -991,7 +991,7 @@ request_aborting_state_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, + sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1012,7 +1012,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); @@ -1036,7 +1036,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1057,7 +1057,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * unexpected. but if the TC has success status, we * complete the IO anyway. */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1074,7 +1074,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * these SMP_XXX_XX_ERR status. For these type of error, * we ask ihost user to retry the request. */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, + sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, SCI_FAILURE_RETRY_REQUIRED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1084,7 +1084,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1101,7 +1101,7 @@ smp_request_await_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -1111,7 +1111,7 @@ smp_request_await_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1122,7 +1122,7 @@ smp_request_await_tc_event(struct isci_request *ireq, return SCI_SUCCESS; } -void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, +void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag) { /** @@ -1171,7 +1171,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); @@ -1182,7 +1182,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1198,7 +1198,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, /* transmit DATA_FIS from (current sgl + offset) for input * parameter length. current sgl and offset is alreay stored in the IO request */ -static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( +static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( struct isci_request *ireq, u32 length) { @@ -1223,10 +1223,10 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( task_context->type.stp.fis_type = FIS_DATA; /* send the new TC out. */ - return scic_controller_continue_io(ireq); + return sci_controller_continue_io(ireq); } -static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) +static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) { struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_sgl_element_pair *sgl_pair; @@ -1252,7 +1252,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is return SCI_SUCCESS; if (stp_req->pio_len >= len) { - status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len); + status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); if (status != SCI_SUCCESS) return status; stp_req->pio_len -= len; @@ -1261,7 +1261,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is sgl = pio_sgl_next(stp_req); offset = 0; } else if (stp_req->pio_len < len) { - scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); + sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); /* Sgl offset will be adjusted and saved for future */ offset += stp_req->pio_len; @@ -1284,7 +1284,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is * specified data region. enum sci_status */ static enum sci_status -scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, +sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, u8 *data_buf, u32 len) { struct isci_request *ireq; @@ -1328,7 +1328,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r * * Copy the data buffer to the io request data region. enum sci_status */ -static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( +static enum sci_status sci_stp_request_pio_data_in_copy_data( struct isci_stp_request *stp_req, u8 *data_buffer) { @@ -1338,14 +1338,14 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( * If there is less than 1K remaining in the transfer request * copy just the data for the transfer */ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, stp_req->pio_len); if (status == SCI_SUCCESS) stp_req->pio_len = 0; } else { /* We are transfering the whole frame so copy */ - status = scic_sds_stp_request_pio_data_in_copy_data_buffer( + status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); if (status == SCI_SUCCESS) @@ -1363,7 +1363,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); @@ -1375,7 +1375,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1398,7 +1398,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ if (stp_req->pio_len != 0) { - status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); + status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status == SCI_SUCCESS) { if (stp_req->pio_len == 0) all_frames_transferred = true; @@ -1426,7 +1426,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status( + sci_request_set_status( ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -1438,16 +1438,16 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, return status; } -static void scic_sds_stp_request_udma_complete_request( +static void sci_stp_request_udma_complete_request( struct isci_request *ireq, u32 scu_status, enum sci_status sci_status) { - scic_sds_request_set_status(ireq, scu_status, sci_status); + sci_request_set_status(ireq, scu_status, sci_status); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } -static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, +static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; @@ -1455,28 +1455,28 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is enum sci_status status; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if ((status == SCI_SUCCESS) && (frame_header->fis_type == FIS_REGD2H)) { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); } - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } enum sci_status -scic_sds_io_request_frame_handler(struct isci_request *ireq, +sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; @@ -1491,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_frame_hdr ssp_hdr; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1502,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct ssp_response_iu *resp_iu; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&resp_iu); @@ -1512,11 +1512,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, if (resp_iu->datapres == 0x01 || resp_iu->datapres == 0x02) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); } else - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); } else { @@ -1531,22 +1531,22 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * In any case we are done with this frame buffer return it to * the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_TASK_WAIT_TC_RESP: - scic_sds_io_request_copy_response(ireq); + sci_io_request_copy_response(ireq); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); - scic_sds_controller_release_frame(ihost,frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { struct smp_resp *rsp_hdr = &ireq->smp.rsp; void *frame_header; - scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); @@ -1557,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, if (rsp_hdr->frame_type == SMP_RESPONSE) { void *smp_resp; - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, &smp_resp); @@ -1567,7 +1567,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, smp_resp, word_cnt); - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); @@ -1584,31 +1584,31 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, rsp_hdr->frame_type); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_SMP_FRM_TYPE_ERR, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_STP_UDMA_WAIT_TC_COMP: - return scic_sds_stp_request_udma_general_frame_handler(ireq, + return sci_stp_request_udma_general_frame_handler(ireq, frame_index); case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ - status = scic_sds_stp_request_udma_general_frame_handler(ireq, + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); if (status != SCI_SUCCESS) return status; - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1618,7 +1618,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1636,16 +1636,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1655,7 +1655,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, "violation occurred\n", __func__, stp_req, frame_index); - scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, + sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; } @@ -1663,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1673,7 +1673,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1688,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_PIO_SETUP: /* Get from the frame buffer the PIO Setup Data */ - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); @@ -1704,7 +1704,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, /* status: 4th byte in the 3rd dword */ stp_req->status = (frame_buffer[2] >> 24) & 0xff; - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); @@ -1717,7 +1717,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); } else if (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */ - status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); + status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status != SCI_SUCCESS) break; sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); @@ -1745,15 +1745,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, break; } - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.req, frame_header, frame_buffer); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1766,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, } /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1775,7 +1775,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); @@ -1800,14 +1800,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, frame_header->fis_type); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1815,15 +1815,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, ireq->saved_rx_frame_index = frame_index; stp_req->pio_len = 0; } else { - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, + status = sci_stp_request_pio_data_in_copy_data(stp_req, (u8 *)frame_buffer); /* Frame is decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); } /* Check for the end of the transfer, are there more @@ -1833,7 +1833,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, return status; if ((stp_req->status & ATA_BUSY) == 0) { - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); @@ -1848,7 +1848,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, struct dev_to_host_fis *frame_header; u32 *frame_buffer; - status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { @@ -1864,16 +1864,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, switch (frame_header->fis_type) { case FIS_REGD2H: - scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); - scic_sds_controller_copy_sata_response(&ireq->stp.rsp, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -1886,7 +1886,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, stp_req, frame_index); - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, SCI_FAILURE_PROTOCOL_VIOLATION); break; @@ -1895,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return status; } @@ -1904,7 +1904,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, * TODO: Is it even possible to get an unsolicited frame in the * aborting state? */ - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; default: @@ -1915,7 +1915,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, frame_index, state); - scic_sds_controller_release_frame(ihost, frame_index); + sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; } } @@ -1927,7 +1927,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -1938,10 +1938,10 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq * completion. */ if (ireq->stp.rsp.fis_type == FIS_REGD2H) { - scic_sds_remote_device_suspend(ireq->target_device, + sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else { @@ -1965,12 +1965,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): - scic_sds_remote_device_suspend(ireq->target_device, + sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ - scic_sds_stp_request_udma_complete_request(ireq, + sci_stp_request_udma_complete_request(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); break; @@ -1985,7 +1985,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); @@ -1997,7 +1997,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -2014,7 +2014,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, + sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); @@ -2025,7 +2025,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, * a NAK was received, then it is up to the user to retry the * request. */ - scic_sds_request_set_status(ireq, + sci_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -2037,7 +2037,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, } enum sci_status -scic_sds_io_request_tc_completion(struct isci_request *ireq, +sci_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) { enum sci_base_request_states state; @@ -2832,7 +2832,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, ); /* complete the io request to the core. */ - scic_controller_complete_io(ihost, request->target_device, request); + sci_controller_complete_io(ihost, request->target_device, request); isci_put_device(idev); /* set terminated handle so it cannot be completed or @@ -2842,7 +2842,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, set_bit(IREQ_TERMINATED, &request->flags); } -static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) +static void sci_request_started_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct domain_device *dev = ireq->target_device->domain_dev; @@ -2879,7 +2879,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * } } -static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) +static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_host *ihost = ireq->owning_controller; @@ -2892,7 +2892,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine isci_task_request_complete(ihost, ireq, ireq->sci_status); } -static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) +static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); @@ -2900,31 +2900,31 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine ireq->tc->abort = 1; } -static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - scic_sds_remote_device_set_working_request(ireq->target_device, + sci_remote_device_set_working_request(ireq->target_device, ireq); } -static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) +static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct scu_task_context *tc = ireq->tc; @@ -2938,22 +2938,22 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet /* Clear the TC control bit */ tc->control_frame = 0; - status = scic_controller_continue_io(ireq); + status = sci_controller_continue_io(ireq); WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); } -static const struct sci_base_state scic_sds_request_state_table[] = { +static const struct sci_base_state sci_request_state_table[] = { [SCI_REQ_INIT] = { }, [SCI_REQ_CONSTRUCTED] = { }, [SCI_REQ_STARTED] = { - .enter_state = scic_sds_request_started_state_enter, + .enter_state = sci_request_started_state_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { - .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, + .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, [SCI_REQ_STP_PIO_WAIT_H2D] = { - .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, + .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, }, [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, [SCI_REQ_STP_PIO_DATA_IN] = { }, @@ -2961,10 +2961,10 @@ static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, + .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, }, [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { - .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, + .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, }, [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, [SCI_REQ_TASK_WAIT_TC_COMP] = { }, @@ -2972,20 +2972,20 @@ static const struct sci_base_state scic_sds_request_state_table[] = { [SCI_REQ_SMP_WAIT_RESP] = { }, [SCI_REQ_SMP_WAIT_TC_COMP] = { }, [SCI_REQ_COMPLETED] = { - .enter_state = scic_sds_request_completed_state_enter, + .enter_state = sci_request_completed_state_enter, }, [SCI_REQ_ABORTING] = { - .enter_state = scic_sds_request_aborting_state_enter, + .enter_state = sci_request_aborting_state_enter, }, [SCI_REQ_FINAL] = { }, }; static void -scic_sds_general_request_construct(struct isci_host *ihost, +sci_general_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { - sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); + sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); ireq->target_device = idev; ireq->protocol = SCIC_NO_PROTOCOL; @@ -2997,7 +2997,7 @@ scic_sds_general_request_construct(struct isci_host *ihost, } static enum sci_status -scic_io_request_construct(struct isci_host *ihost, +sci_io_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { @@ -3005,7 +3005,7 @@ scic_io_request_construct(struct isci_host *ihost, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(ihost, idev, ireq); + sci_general_request_construct(ihost, idev, ireq); if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; @@ -3024,7 +3024,7 @@ scic_io_request_construct(struct isci_host *ihost, return status; } -enum sci_status scic_task_request_construct(struct isci_host *ihost, +enum sci_status sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq) { @@ -3032,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct isci_host *ihost, enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ - scic_sds_general_request_construct(ihost, idev, ireq); + sci_general_request_construct(ihost, idev, ireq); if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { @@ -3053,7 +3053,7 @@ static enum sci_status isci_request_ssp_request_construct( "%s: request = %p\n", __func__, request); - status = scic_io_request_construct_basic_ssp(request); + status = sci_io_request_construct_basic_ssp(request); return status; } @@ -3074,7 +3074,7 @@ static enum sci_status isci_request_stp_request_construct( */ register_fis = isci_sata_task_to_fis_copy(task); - status = scic_io_request_construct_basic_sata(request); + status = sci_io_request_construct_basic_sata(request); /* Set the ncq tag in the fis, from the queue * command in the task. @@ -3091,7 +3091,7 @@ static enum sci_status isci_request_stp_request_construct( } static enum sci_status -scic_io_request_construct_smp(struct device *dev, +sci_io_request_construct_smp(struct device *dev, struct isci_request *ireq, struct sas_task *task) { @@ -3141,8 +3141,8 @@ scic_io_request_construct_smp(struct device *dev, task_context = ireq->tc; - idev = scic_sds_request_get_device(ireq); - iport = scic_sds_request_get_port(ireq); + idev = sci_request_get_device(ireq); + iport = sci_request_get_port(ireq); /* * Fill in the TC with the its required data @@ -3152,8 +3152,8 @@ scic_io_request_construct_smp(struct device *dev, task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = - scic_sds_controller_get_protocol_engine_group(ihost); - task_context->logical_port_index = scic_sds_port_get_index(iport); + sci_controller_get_protocol_engine_group(ihost); + task_context->logical_port_index = sci_port_get_index(iport); task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->abort = 0; task_context->valid = SCU_TASK_CONTEXT_VALID; @@ -3195,9 +3195,9 @@ scic_io_request_construct_smp(struct device *dev, task_context->task_phase = 0; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (scic_sds_controller_get_protocol_engine_group(ihost) << + (sci_controller_get_protocol_engine_group(ihost) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (scic_sds_port_get_index(iport) << + (sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* @@ -3229,7 +3229,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) struct device *dev = &ireq->isci_host->pdev->dev; enum sci_status status = SCI_FAILURE; - status = scic_io_request_construct_smp(dev, ireq, task); + status = sci_io_request_construct_smp(dev, ireq, task); if (status != SCI_SUCCESS) dev_warn(&ireq->isci_host->pdev->dev, "%s: failed with status = %d\n", @@ -3283,7 +3283,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost, return SCI_FAILURE_INSUFFICIENT_RESOURCES; } - status = scic_io_request_construct(ihost, idev, request); + status = sci_io_request_construct(ihost, idev, request); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -3388,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide * request was built that way (ie. * ireq->is_task_management_request is false). */ - status = scic_controller_start_task(ihost, + status = sci_controller_start_task(ihost, idev, ireq); } else { @@ -3396,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide } } else { /* send the request, let the core assign the IO TAG. */ - status = scic_controller_start_io(ihost, idev, + status = sci_controller_start_io(ihost, idev, ireq); } diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 0cafcead7a0..08fcf98e70f 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -301,75 +301,75 @@ enum sci_base_request_states { }; /** - * scic_sds_request_get_controller() - + * sci_request_get_controller() - * * This macro will return the controller for this io request object */ -#define scic_sds_request_get_controller(ireq) \ +#define sci_request_get_controller(ireq) \ ((ireq)->owning_controller) /** - * scic_sds_request_get_device() - + * sci_request_get_device() - * * This macro will return the device for this io request object */ -#define scic_sds_request_get_device(ireq) \ +#define sci_request_get_device(ireq) \ ((ireq)->target_device) /** - * scic_sds_request_get_port() - + * sci_request_get_port() - * * This macro will return the port for this io request object */ -#define scic_sds_request_get_port(ireq) \ - scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq)) +#define sci_request_get_port(ireq) \ + sci_remote_device_get_port(sci_request_get_device(ireq)) /** - * scic_sds_request_get_post_context() - + * sci_request_get_post_context() - * * This macro returns the constructed post context result for the io request. */ -#define scic_sds_request_get_post_context(ireq) \ +#define sci_request_get_post_context(ireq) \ ((ireq)->post_context) /** - * scic_sds_request_get_task_context() - + * sci_request_get_task_context() - * * This is a helper macro to return the os handle for this request object. */ -#define scic_sds_request_get_task_context(request) \ +#define sci_request_get_task_context(request) \ ((request)->task_context_buffer) /** - * scic_sds_request_set_status() - + * sci_request_set_status() - * * This macro will set the scu hardware status and sci request completion * status for an io request. */ -#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \ +#define sci_request_set_status(request, scu_status_code, sci_status_code) \ { \ (request)->scu_status = (scu_status_code); \ (request)->sci_status = (sci_status_code); \ } -enum sci_status scic_sds_request_start(struct isci_request *ireq); -enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq); +enum sci_status sci_request_start(struct isci_request *ireq); +enum sci_status sci_io_request_terminate(struct isci_request *ireq); enum sci_status -scic_sds_io_request_event_handler(struct isci_request *ireq, +sci_io_request_event_handler(struct isci_request *ireq, u32 event_code); enum sci_status -scic_sds_io_request_frame_handler(struct isci_request *ireq, +sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index); enum sci_status -scic_sds_task_request_terminate(struct isci_request *ireq); +sci_task_request_terminate(struct isci_request *ireq); extern enum sci_status -scic_sds_request_complete(struct isci_request *ireq); +sci_request_complete(struct isci_request *ireq); extern enum sci_status -scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code); +sci_io_request_tc_completion(struct isci_request *ireq, u32 code); /* XXX open code in caller */ static inline dma_addr_t -scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) +sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) { char *requested_addr = (char *)virt_addr; @@ -500,17 +500,17 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide void isci_terminate_pending_requests(struct isci_host *ihost, struct isci_remote_device *idev); enum sci_status -scic_task_request_construct(struct isci_host *ihost, +sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq); enum sci_status -scic_task_request_construct_ssp(struct isci_request *ireq); +sci_task_request_construct_ssp(struct isci_request *ireq); enum sci_status -scic_task_request_construct_sata(struct isci_request *ireq); +sci_task_request_construct_sata(struct isci_request *ireq); void -scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); -void scic_sds_smp_request_copy_response(struct isci_request *ireq); +sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); +void sci_smp_request_copy_response(struct isci_request *ireq); static inline int isci_task_is_ncq_recovery(struct sas_task *task) { diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index 87d8cc1a6e3..47b96c21548 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c @@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag( struct isci_request *request = task->lldd_task; register_fis->sector_count = qc->tag << 3; - scic_stp_io_request_set_ncq_tag(request, qc->tag); + sci_stp_io_request_set_ncq_tag(request, qc->tag); } /** @@ -187,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire /* core builds the protocol specific request * based on the h2d fis. */ - status = scic_task_request_construct_sata(ireq); + status = sci_task_request_construct_sata(ireq); return status; } diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 3a1fc55a755..d040aa2f372 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -257,12 +257,12 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return NULL; /* let the core do it's construct. */ - status = scic_task_request_construct(ihost, idev, tag, + status = sci_task_request_construct(ihost, idev, tag, ireq); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_task_request_construct failed - " + "%s: sci_task_request_construct failed - " "status = 0x%x\n", __func__, status); @@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEV) { isci_tmf->proto = SAS_PROTOCOL_SSP; - status = scic_task_request_construct_ssp(ireq); + status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) return NULL; } @@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ - status = scic_controller_start_task(ihost, idev, ireq); + status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_TASK_SUCCESS) { dev_warn(&ihost->pdev->dev, @@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, if (tmf->cb_state_func != NULL) tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); - scic_controller_terminate_request(ihost, + sci_controller_terminate_request(ihost, idev, ireq); @@ -556,7 +556,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { was_terminated = true; needs_cleanup_handling = true; - status = scic_controller_terminate_request(ihost, + status = sci_controller_terminate_request(ihost, idev, isci_request); } @@ -569,7 +569,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, */ if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, - "%s: scic_controller_terminate_request" + "%s: sci_controller_terminate_request" " returned = 0x%x\n", __func__, status); @@ -1251,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; - scic_controller_complete_io(ihost, ireq->target_device, ireq); + sci_controller_complete_io(ihost, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ @@ -1514,12 +1514,12 @@ static int isci_reset_device(struct isci_host *ihost, dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset(idev); + status = sci_remote_device_reset(idev); if (status != SCI_SUCCESS) { spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_warn(&ihost->pdev->dev, - "%s: scic_remote_device_reset(%p) returned %d!\n", + "%s: sci_remote_device_reset(%p) returned %d!\n", __func__, idev, status); return TMF_RESP_FUNC_FAILED; @@ -1540,7 +1540,7 @@ static int isci_reset_device(struct isci_host *ihost, /* Since all pending TCs have been cleaned, resume the RNC. */ spin_lock_irqsave(&ihost->scic_lock, flags); - status = scic_remote_device_reset_complete(idev); + status = sci_remote_device_reset_complete(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* If this is a device on an expander, bring the phy back up. */ @@ -1560,7 +1560,7 @@ static int isci_reset_device(struct isci_host *ihost, if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, - "%s: scic_remote_device_reset_complete(%p) " + "%s: sci_remote_device_reset_complete(%p) " "returned %d!\n", __func__, idev, status); } diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index a0e6f89fc6a..e9e1e2abacb 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -57,10 +57,10 @@ #include "unsolicited_frame_control.h" #include "registers.h" -int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) +int sci_unsolicited_frame_control_construct(struct isci_host *ihost) { - struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control; - struct scic_sds_unsolicited_frame *uf; + struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; + struct sci_unsolicited_frame *uf; u32 buf_len, header_len, i; dma_addr_t dma; size_t size; @@ -139,23 +139,14 @@ int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) return 0; } -/** - * This method returns the frame header for the specified frame index. - * @uf_control: - * @frame_index: - * @frame_header: - * - * enum sci_status - */ -enum sci_status scic_sds_unsolicited_frame_control_get_header( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index, - void **frame_header) +enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { - /* - * Skip the first word in the frame since this is a controll word used - * by the hardware. */ + /* Skip the first word in the frame since this is a controll word used + * by the hardware. + */ *frame_header = &uf_control->buffers.array[frame_index].header->data; return SCI_SUCCESS; @@ -164,18 +155,9 @@ enum sci_status scic_sds_unsolicited_frame_control_get_header( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } -/** - * This method returns the frame buffer for the specified frame index. - * @uf_control: - * @frame_index: - * @frame_buffer: - * - * enum sci_status - */ -enum sci_status scic_sds_unsolicited_frame_control_get_buffer( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index, - void **frame_buffer) +enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { *frame_buffer = uf_control->buffers.array[frame_index].buffer; @@ -186,19 +168,8 @@ enum sci_status scic_sds_unsolicited_frame_control_get_buffer( return SCI_FAILURE_INVALID_PARAMETER_VALUE; } -/** - * This method releases the frame once this is done the frame is available for - * re-use by the hardware. The data contained in the frame header and frame - * buffer is no longer valid. - * @uf_control: This parameter specifies the UF control object - * @frame_index: This parameter specifies the frame index to attempt to release. - * - * This method returns an indication to the caller as to whether the - * unsolicited frame get pointer should be updated. - */ -bool scic_sds_unsolicited_frame_control_release_frame( - struct scic_sds_unsolicited_frame_control *uf_control, - u32 frame_index) +bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index) { u32 frame_get; u32 frame_cycle; diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index c0285a3db56..b849a84af34 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -92,12 +92,12 @@ enum unsolicited_frame_state { }; /** - * struct scic_sds_unsolicited_frame - + * struct sci_unsolicited_frame - * * This is the unsolicited frame data structure it acts as the container for * the current frame state, frame header and frame buffer. */ -struct scic_sds_unsolicited_frame { +struct sci_unsolicited_frame { /** * This field contains the current frame state */ @@ -116,11 +116,11 @@ struct scic_sds_unsolicited_frame { }; /** - * struct scic_sds_uf_header_array - + * struct sci_uf_header_array - * * This structure contains all of the unsolicited frame header information. */ -struct scic_sds_uf_header_array { +struct sci_uf_header_array { /** * This field is represents a virtual pointer to the start * address of the UF address table. The table contains @@ -137,19 +137,19 @@ struct scic_sds_uf_header_array { }; /** - * struct scic_sds_uf_buffer_array - + * struct sci_uf_buffer_array - * * This structure contains all of the unsolicited frame buffer (actual payload) * information. */ -struct scic_sds_uf_buffer_array { +struct sci_uf_buffer_array { /** * This field is the unsolicited frame data its used to manage * the data for the unsolicited frame requests. It also represents * the virtual address location that corresponds to the * physical_address field. */ - struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; + struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; /** * This field specifies the physical address location for the UF @@ -159,13 +159,13 @@ struct scic_sds_uf_buffer_array { }; /** - * struct scic_sds_uf_address_table_array - + * struct sci_uf_address_table_array - * * This object maintains all of the unsolicited frame address table specific * data. The address table is a collection of 64-bit pointers that point to * 1KB buffers into which the silicon will DMA unsolicited frames. */ -struct scic_sds_uf_address_table_array { +struct sci_uf_address_table_array { /** * This field represents a virtual pointer that refers to the * starting address of the UF address table. @@ -182,11 +182,11 @@ struct scic_sds_uf_address_table_array { }; /** - * struct scic_sds_unsolicited_frame_control - + * struct sci_unsolicited_frame_control - * * This object contains all of the data necessary to handle unsolicited frames. */ -struct scic_sds_unsolicited_frame_control { +struct sci_unsolicited_frame_control { /** * This field is the software copy of the unsolicited frame queue * get pointer. The controller object writes this value to the @@ -198,38 +198,38 @@ struct scic_sds_unsolicited_frame_control { * This field contains all of the unsolicited frame header * specific fields. */ - struct scic_sds_uf_header_array headers; + struct sci_uf_header_array headers; /** * This field contains all of the unsolicited frame buffer * specific fields. */ - struct scic_sds_uf_buffer_array buffers; + struct sci_uf_buffer_array buffers; /** * This field contains all of the unsolicited frame address table * specific fields. */ - struct scic_sds_uf_address_table_array address_table; + struct sci_uf_address_table_array address_table; }; struct isci_host; -int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost); +int sci_unsolicited_frame_control_construct(struct isci_host *ihost); -enum sci_status scic_sds_unsolicited_frame_control_get_header( - struct scic_sds_unsolicited_frame_control *uf_control, +enum sci_status sci_unsolicited_frame_control_get_header( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_header); -enum sci_status scic_sds_unsolicited_frame_control_get_buffer( - struct scic_sds_unsolicited_frame_control *uf_control, +enum sci_status sci_unsolicited_frame_control_get_buffer( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_buffer); -bool scic_sds_unsolicited_frame_control_release_frame( - struct scic_sds_unsolicited_frame_control *uf_control, +bool sci_unsolicited_frame_control_release_frame( + struct sci_unsolicited_frame_control *uf_control, u32 frame_index); #endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ -- cgit v1.2.3-70-g09d2 From 34a991587a5cc9f78960c2c9beea217866458c41 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Fri, 1 Jul 2011 02:25:15 -0700 Subject: isci: kill 'get/set' macros Most of these simple dereference macros are longer than their open coded equivalent. Deleting enum sci_controller_mode is thrown in for good measure. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/host.c | 8 +- drivers/scsi/isci/host.h | 20 +- drivers/scsi/isci/isci.h | 5 - drivers/scsi/isci/phy.c | 33 ++-- drivers/scsi/isci/phy.h | 17 -- drivers/scsi/isci/port.c | 4 +- drivers/scsi/isci/port.h | 17 -- drivers/scsi/isci/port_config.c | 14 +- drivers/scsi/isci/remote_device.c | 43 ++--- drivers/scsi/isci/remote_device.h | 97 ++-------- drivers/scsi/isci/remote_node_context.c | 2 +- drivers/scsi/isci/remote_node_context.h | 3 - drivers/scsi/isci/request.c | 328 +++++++++++++------------------- drivers/scsi/isci/request.h | 73 ------- drivers/scsi/isci/task.c | 2 +- 15 files changed, 188 insertions(+), 478 deletions(-) (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index f31f64e4b71..88e73133353 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -2627,7 +2627,7 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, return status; set_bit(IREQ_ACTIVE, &ireq->flags); - sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); + sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } @@ -2707,7 +2707,7 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) } set_bit(IREQ_ACTIVE, &ireq->flags); - sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); + sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } @@ -2747,9 +2747,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, return SCI_SUCCESS; case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); - - sci_controller_post_request(ihost, - sci_request_get_post_context(ireq)); + sci_controller_post_request(ihost, ireq->post_context); break; default: break; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index d87f21de180..a72d2be2445 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -172,6 +172,7 @@ struct isci_host { /* XXX kill */ bool phy_startup_timer_pending; u32 next_phy_to_start; + /* XXX convert to unsigned long and use bitops */ u8 invalid_phy_mask; /* TODO attempt dynamic interrupt coalescing scheme */ @@ -359,13 +360,8 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) return dev->port->ha->lldd_ha; } -/** - * sci_controller_get_protocol_engine_group() - - * - * This macro returns the protocol engine group for this controller object. - * Presently we only support protocol engine group 0 so just return that - */ -#define sci_controller_get_protocol_engine_group(controller) 0 +/* we always use protocol engine group zero */ +#define ISCI_PEG 0 /* see sci_controller_io_tag_allocate|free for how seq and tci are built */ #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) @@ -385,16 +381,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) return SCU_SSP_REMOTE_NODE_COUNT; } -/** - * sci_controller_set_invalid_phy() - - * - * This macro will set the bit in the invalid phy mask for this controller - * object. This is used to control messages reported for invalid link up - * notifications. - */ -#define sci_controller_set_invalid_phy(controller, phy) \ - ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) - /** * sci_controller_clear_invalid_phy() - * diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 3afccfcb94e..d1de63312e7 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -73,11 +73,6 @@ #define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF -enum sci_controller_mode { - SCI_MODE_SPEED, - SCI_MODE_SIZE /* deprecated */ -}; - #define SCI_MAX_PHYS (4UL) #define SCI_MAX_PORTS SCI_MAX_PHYS #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 0df9f713f48..e56080af78f 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -265,10 +265,11 @@ done: * port (i.e. it's contained in the dummy port). !NULL All other * values indicate a handle/pointer to the port containing the phy. */ -struct isci_port *phy_get_non_dummy_port( - struct isci_phy *iphy) +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) { - if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) + struct isci_port *iport = iphy->owning_port; + + if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) return NULL; return iphy->owning_port; @@ -858,10 +859,9 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) struct dev_to_host_fis *frame_header; u32 *fis_frame_data; - result = sci_unsolicited_frame_control_get_header( - &(sci_phy_get_controller(iphy)->uf_control), - frame_index, - (void **)&frame_header); + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); if (result != SCI_SUCCESS) return result; @@ -1090,6 +1090,8 @@ static void scu_link_layer_tx_hard_reset( static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; /* * @todo We need to get to the controller to place this PE in a @@ -1100,14 +1102,14 @@ static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) scu_link_layer_stop_protocol_engine(iphy); if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) - sci_controller_link_down(sci_phy_get_controller(iphy), - phy_get_non_dummy_port(iphy), - iphy); + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; scu_link_layer_stop_protocol_engine(iphy); scu_link_layer_start_oob(iphy); @@ -1117,9 +1119,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) - sci_controller_link_down(sci_phy_get_controller(iphy), - phy_get_non_dummy_port(iphy), - iphy); + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); } @@ -1127,11 +1127,10 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; - sci_controller_link_up(sci_phy_get_controller(iphy), - phy_get_non_dummy_port(iphy), - iphy); - + sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 5d2c1b4906a..67699c8e321 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -440,23 +440,6 @@ enum sci_phy_states { SCI_PHY_FINAL, }; -/** - * sci_phy_get_index() - - * - * This macro returns the phy index for the specified phy - */ -#define sci_phy_get_index(phy) \ - ((phy)->phy_index) - -/** - * sci_phy_get_controller() - This macro returns the controller for this - * phy - * - * - */ -#define sci_phy_get_controller(phy) \ - (sci_port_get_controller((phy)->owning_port)) - void sci_phy_construct( struct isci_phy *iphy, struct isci_port *iport, diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 1822ed68409..8f6f9b77e41 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -654,7 +654,7 @@ static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { - struct isci_host *ihost = sci_port_get_controller(iport); + struct isci_host *ihost = iport->owning_controller; iport->active_phy_mask &= ~(1 << iphy->phy_index); @@ -678,7 +678,7 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i * invalid link. */ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { - sci_controller_set_invalid_phy(ihost, iphy); + ihost->invalid_phy_mask |= 1 << iphy->phy_index; dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); } } diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 4c4ab8126d9..b50ecd4e8f9 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -210,23 +210,6 @@ enum sci_port_states { }; -/** - * sci_port_get_controller() - - * - * Helper macro to get the owning controller of this port - */ -#define sci_port_get_controller(this_port) \ - ((this_port)->owning_controller) - -/** - * sci_port_get_index() - - * - * This macro returns the physical port index for this port object - */ -#define sci_port_get_index(this_port) \ - ((this_port)->physical_port_index) - - static inline void sci_port_decrement_request_count(struct isci_port *iport) { if (WARN_ONCE(iport->started_request_count == 0, diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index c8b16db6bbd..486b113c634 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -367,10 +367,10 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost, if (!iport) return; - port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); + port_agent->phy_ready_mask |= (1 << iphy->phy_index); sci_port_link_up(iport, iphy); - if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) - port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); + if ((iport->active_phy_mask & (1 << iphy->phy_index))) + port_agent->phy_configured_mask |= (1 << iphy->phy_index); } /** @@ -404,10 +404,8 @@ static void sci_mpc_agent_link_down( * rebuilding the port with the phys that remain in the ready * state. */ - port_agent->phy_ready_mask &= - ~(1 << sci_phy_get_index(iphy)); - port_agent->phy_configured_mask &= - ~(1 << sci_phy_get_index(iphy)); + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); /* * Check to see if there are more phys waiting to be @@ -643,7 +641,7 @@ static void sci_apc_agent_link_down( struct isci_port *iport, struct isci_phy *iphy) { - port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); if (!iport) return; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 8c752abb433..85e54f54207 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -456,7 +456,7 @@ static void sci_remote_device_start_request(struct isci_remote_device *idev, sci_port_complete_io(iport, idev, ireq); else { kref_get(&idev->kref); - sci_remote_device_increment_request_count(idev); + idev->started_request_count++; } } @@ -636,7 +636,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); - } else if (sci_remote_device_get_request_count(idev) == 0) + } else if (idev->started_request_count == 0) sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: @@ -650,10 +650,10 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, if (status != SCI_SUCCESS) break; - if (sci_remote_device_get_request_count(idev) == 0) + if (idev->started_request_count == 0) sci_remote_node_context_destruct(&idev->rnc, - rnc_destruct_done, - idev); + rnc_destruct_done, + idev); break; } @@ -761,26 +761,17 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, return status; } -/** - * - * @sci_dev: - * @request: - * - * This method takes the request and bulids an appropriate SCU context for the - * request and then requests the controller to post the request. none - */ -void sci_remote_device_post_request( - struct isci_remote_device *idev, - u32 request) +void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) { + struct isci_port *iport = idev->owning_port; u32 context; - context = sci_remote_device_build_command_context(idev, request); + context = request | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + idev->rnc.remote_node_index; - sci_controller_post_request( - sci_remote_device_get_controller(idev), - context - ); + sci_controller_post_request(iport->owning_controller, context); } /* called once the remote node context has transisitioned to a @@ -893,7 +884,7 @@ static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = sci_remote_device_get_controller(idev); + struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); @@ -961,7 +952,7 @@ static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_stat static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = sci_remote_device_get_controller(idev); + struct isci_host *ihost = idev->owning_port->owning_controller; BUG_ON(idev->working_request == NULL); @@ -972,7 +963,7 @@ static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = sci_remote_device_get_controller(idev); + struct isci_host *ihost = idev->owning_port->owning_controller; if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) isci_remote_device_not_ready(ihost, idev, @@ -982,7 +973,7 @@ static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = sci_remote_device_get_controller(idev); + struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_ready(ihost, idev); } @@ -990,7 +981,7 @@ static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_stat static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); - struct isci_host *ihost = sci_remote_device_get_controller(idev); + struct isci_host *ihost = idev->owning_port->owning_controller; BUG_ON(idev->working_request == NULL); diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index fa9a0e6cc30..57ccfc3d6ad 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -305,91 +305,18 @@ static inline bool dev_is_expander(struct domain_device *dev) return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; } -/** - * sci_remote_device_increment_request_count() - - * - * This macro incrments the request count for this device - */ -#define sci_remote_device_increment_request_count(idev) \ - ((idev)->started_request_count++) - -/** - * sci_remote_device_decrement_request_count() - - * - * This macro decrements the request count for this device. This count will - * never decrment past 0. - */ -#define sci_remote_device_decrement_request_count(idev) \ - ((idev)->started_request_count > 0 ? \ - (idev)->started_request_count-- : 0) - -/** - * sci_remote_device_get_request_count() - - * - * This is a helper macro to return the current device request count. - */ -#define sci_remote_device_get_request_count(idev) \ - ((idev)->started_request_count) - -/** - * sci_remote_device_get_controller() - - * - * This macro returns the controller object that contains this device object - */ -#define sci_remote_device_get_controller(idev) \ - sci_port_get_controller(sci_remote_device_get_port(idev)) - -/** - * sci_remote_device_get_port() - - * - * This macro returns the owning port of this device - */ -#define sci_remote_device_get_port(idev) \ - ((idev)->owning_port) - -/** - * sci_remote_device_get_controller_peg() - - * - * This macro returns the controllers protocol engine group - */ -#define sci_remote_device_get_controller_peg(idev) \ - (\ - sci_controller_get_protocol_engine_group(\ - sci_port_get_controller(\ - sci_remote_device_get_port(idev) \ - ) \ - ) \ - ) - -/** - * sci_remote_device_get_index() - - * - * This macro returns the remote node index for this device object - */ -#define sci_remote_device_get_index(idev) \ - ((idev)->rnc.remote_node_index) - -/** - * sci_remote_device_build_command_context() - - * - * This macro builds a remote device context for the SCU post request operation - */ -#define sci_remote_device_build_command_context(device, command) \ - ((command) \ - | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ - | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ - | (sci_remote_device_get_index((device))) \ - ) - -/** - * sci_remote_device_set_working_request() - - * - * This macro makes the working request assingment for the remote device - * object. To clear the working request use this macro with a NULL request - * object. - */ -#define sci_remote_device_set_working_request(device, request) \ - ((device)->working_request = (request)) +static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) +{ + /* XXX delete this voodoo when converting to the top-level device + * reference count + */ + if (WARN_ONCE(idev->started_request_count == 0, + "%s: tried to decrement started_request_count past 0!?", + __func__)) + /* pass */; + else + idev->started_request_count--; +} enum sci_status sci_remote_device_frame_handler( struct isci_remote_device *idev, diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index c2dfd5a7218..748e8339d1e 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -111,7 +111,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont struct isci_host *ihost; __le64 sas_addr; - ihost = sci_remote_device_get_controller(idev); + ihost = idev->owning_port->owning_controller; rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index b475c5c2664..41580ad1252 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -204,9 +204,6 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, bool sci_remote_node_context_is_ready( struct sci_remote_node_context *sci_rnc); -#define sci_remote_node_context_get_remote_node_index(rcn) \ - ((rnc)->remote_node_index) - enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code); enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index bcb3c08c19a..7c500bb6a8e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -211,22 +211,21 @@ static void scu_ssp_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = sci_request_get_device(ireq); - iport = sci_request_get_port(ireq); + idev = ireq->target_device; + iport = idev->owning_port; /* Fill in the TC with the its required data */ task_context->abort = 0; task_context->priority = 0; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; - task_context->protocol_engine_index = - sci_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = sci_port_get_index(iport); + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = sci_remote_device_get_index(idev); + task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->link_layer_control = 0; @@ -242,9 +241,8 @@ static void scu_ssp_reqeust_construct_task_context( task_context->task_phase = 0x01; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (sci_controller_get_protocol_engine_group(controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (sci_port_get_index(iport) << + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); @@ -349,23 +347,21 @@ static void scu_sata_reqeust_construct_task_context( struct isci_remote_device *idev; struct isci_port *iport; - idev = sci_request_get_device(ireq); - iport = sci_request_get_port(ireq); + idev = ireq->target_device; + iport = idev->owning_port; /* Fill in the TC with the its required data */ task_context->abort = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; - task_context->protocol_engine_index = - sci_controller_get_protocol_engine_group(controller); - task_context->logical_port_index = - sci_port_get_index(iport); + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; - task_context->remote_node_index = sci_remote_device_get_index(idev); + task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->link_layer_control = 0; @@ -385,11 +381,10 @@ static void scu_sata_reqeust_construct_task_context( task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (sci_controller_get_protocol_engine_group(controller) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (sci_port_get_index(iport) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(ireq->io_tag)); + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the @@ -716,10 +711,8 @@ sci_io_request_terminate(struct isci_request *ireq) switch (state) { case SCI_REQ_CONSTRUCTED: - sci_request_set_status(ireq, - SCU_TASK_DONE_TASK_ABORT, - SCI_FAILURE_IO_TERMINATED); - + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STARTED: @@ -848,9 +841,8 @@ request_started_state_tc_event(struct isci_request *ireq, */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { /* There are times when the SCU hardware will return an early @@ -868,13 +860,11 @@ request_started_state_tc_event(struct isci_request *ireq, word_cnt); if (resp->status == 0) { - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS_IO_DONE_EARLY); + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; } else { - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; } break; } @@ -885,9 +875,8 @@ request_started_state_tc_event(struct isci_request *ireq, &ireq->ssp.rsp, word_cnt); - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; break; } @@ -900,13 +889,12 @@ request_started_state_tc_event(struct isci_request *ireq, datapres = resp_iu->datapres; if (datapres == 1 || datapres == 2) { - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - } else - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } break; /* only stp device gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): @@ -921,15 +909,13 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (ireq->protocol == SCIC_STP_PROTOCOL) { - sci_request_set_status(ireq, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; } else { - sci_request_set_status(ireq, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; } break; @@ -944,10 +930,9 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - sci_request_set_status(ireq, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; break; /* neither ssp nor stp gets suspended. */ @@ -967,11 +952,9 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: - sci_request_set_status( - ireq, - SCU_GET_COMPLETION_TL_STATUS(completion_code) >> - SCU_COMPLETION_TL_STATUS_SHIFT, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; break; } @@ -991,9 +974,8 @@ request_aborting_state_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, - SCI_FAILURE_IO_TERMINATED); - + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; @@ -1012,9 +994,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): @@ -1036,10 +1017,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq * If a NAK was received, then it is up to the user to retry * the request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1057,12 +1036,10 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * unexpected. but if the TC has success status, we * complete the IO anyway. */ - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): @@ -1074,20 +1051,16 @@ smp_request_await_response_tc_event(struct isci_request *ireq, * these SMP_XXX_XX_ERR status. For these type of error, * we ask ihost user to retry the request. */ - sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, - SCI_FAILURE_RETRY_REQUIRED); - + ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; + ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; - default: /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1101,9 +1074,8 @@ smp_request_await_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: @@ -1111,10 +1083,8 @@ smp_request_await_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1171,9 +1141,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); break; @@ -1182,10 +1151,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1363,10 +1330,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; @@ -1375,10 +1340,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, * complete. If a NAK was received, then it is up to * the user to retry the request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1426,11 +1389,8 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - sci_request_set_status( - ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1438,15 +1398,6 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, return status; } -static void sci_stp_request_udma_complete_request( - struct isci_request *ireq, - u32 scu_status, - enum sci_status sci_status) -{ - sci_request_set_status(ireq, scu_status, sci_status); - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); -} - static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { @@ -1512,13 +1463,12 @@ sci_io_request_frame_handler(struct isci_request *ireq, if (resp_iu->datapres == 0x01 || resp_iu->datapres == 0x02) { - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - } else - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } } else { /* not a response frame, why did it get forwarded? */ dev_err(&ihost->pdev->dev, @@ -1567,9 +1517,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, smp_resp, word_cnt); - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); } else { /* @@ -1584,10 +1533,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, rsp_hdr->frame_type); - sci_request_set_status(ireq, - SCU_TASK_DONE_SMP_FRM_TYPE_ERR, - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } @@ -1602,16 +1549,14 @@ sci_io_request_frame_handler(struct isci_request *ireq, case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ - status = sci_stp_request_udma_general_frame_handler(ireq, - frame_index); + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); if (status != SCI_SUCCESS) return status; - sci_stp_request_udma_complete_request(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STP_NON_DATA_WAIT_D2H: { @@ -1645,8 +1590,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_buffer); /* The command has completed with error */ - sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; break; default: @@ -1655,8 +1600,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, "violation occurred\n", __func__, stp_req, frame_index); - sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; break; } @@ -1753,10 +1698,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_header, frame_buffer); - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; @@ -1800,10 +1743,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, frame_header->fis_type); - sci_request_set_status(ireq, - SCU_TASK_DONE_GOOD, - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ @@ -1833,10 +1774,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, return status; if ((stp_req->status & ATA_BUSY) == 0) { - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); - + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } else { sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); @@ -1873,9 +1812,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_buffer); /* The command has completed with error */ - sci_request_set_status(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; break; default: @@ -1886,9 +1824,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, stp_req, frame_index); - sci_request_set_status(ireq, - SCU_TASK_DONE_UNEXP_FIS, - SCI_FAILURE_PROTOCOL_VIOLATION); + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; break; } @@ -1927,9 +1864,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_stp_request_udma_complete_request(ireq, - SCU_TASK_DONE_GOOD, - SCI_SUCCESS); + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): @@ -1941,9 +1878,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - sci_stp_request_udma_complete_request(ireq, - SCU_TASK_DONE_CHECK_RESPONSE, - SCI_FAILURE_IO_RESPONSE_VALID); + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } else { /* If we have an error completion status for the * TC then we can expect a D2H register FIS from @@ -1970,9 +1907,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ - sci_stp_request_udma_complete_request(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1985,9 +1922,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); break; @@ -1997,10 +1933,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, * If a NAK was received, then it is up to the user to retry * the request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -2014,9 +1948,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, - SCI_SUCCESS); - + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); break; @@ -2025,10 +1958,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, * a NAK was received, then it is up to the user to retry the * request. */ - sci_request_set_status(ireq, - SCU_NORMALIZE_COMPLETION_STATUS(completion_code), - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -2504,7 +2435,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, completion_status); spin_lock(&request->state_lock); - request_status = isci_request_get_state(request); + request_status = request->status; /* Decode the request status. Note that if the request has been * aborted by a task management function, we don't care @@ -2904,24 +2835,21 @@ static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct s { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - sci_remote_device_set_working_request(ireq->target_device, - ireq); + ireq->target_device->working_request = ireq; } static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - sci_remote_device_set_working_request(ireq->target_device, - ireq); + ireq->target_device->working_request = ireq; } static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); - sci_remote_device_set_working_request(ireq->target_device, - ireq); + ireq->target_device->working_request = ireq; } static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) @@ -3141,8 +3069,8 @@ sci_io_request_construct_smp(struct device *dev, task_context = ireq->tc; - idev = sci_request_get_device(ireq); - iport = sci_request_get_port(ireq); + idev = ireq->target_device; + iport = idev->owning_port; /* * Fill in the TC with the its required data @@ -3151,9 +3079,8 @@ sci_io_request_construct_smp(struct device *dev, task_context->priority = 0; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; - task_context->protocol_engine_index = - sci_controller_get_protocol_engine_group(ihost); - task_context->logical_port_index = sci_port_get_index(iport); + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->abort = 0; task_context->valid = SCU_TASK_CONTEXT_VALID; @@ -3195,11 +3122,10 @@ sci_io_request_construct_smp(struct device *dev, task_context->task_phase = 0; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | - (sci_controller_get_protocol_engine_group(ihost) << - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | - (sci_port_get_index(iport) << - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(ireq->io_tag)); + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context command buffer should not contain command header. diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 08fcf98e70f..59708402781 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -300,58 +300,6 @@ enum sci_base_request_states { SCI_REQ_FINAL, }; -/** - * sci_request_get_controller() - - * - * This macro will return the controller for this io request object - */ -#define sci_request_get_controller(ireq) \ - ((ireq)->owning_controller) - -/** - * sci_request_get_device() - - * - * This macro will return the device for this io request object - */ -#define sci_request_get_device(ireq) \ - ((ireq)->target_device) - -/** - * sci_request_get_port() - - * - * This macro will return the port for this io request object - */ -#define sci_request_get_port(ireq) \ - sci_remote_device_get_port(sci_request_get_device(ireq)) - -/** - * sci_request_get_post_context() - - * - * This macro returns the constructed post context result for the io request. - */ -#define sci_request_get_post_context(ireq) \ - ((ireq)->post_context) - -/** - * sci_request_get_task_context() - - * - * This is a helper macro to return the os handle for this request object. - */ -#define sci_request_get_task_context(request) \ - ((request)->task_context_buffer) - -/** - * sci_request_set_status() - - * - * This macro will set the scu hardware status and sci request completion - * status for an io request. - */ -#define sci_request_set_status(request, scu_status_code, sci_status_code) \ - { \ - (request)->scu_status = (scu_status_code); \ - (request)->sci_status = (sci_status_code); \ - } - enum sci_status sci_request_start(struct isci_request *ireq); enum sci_status sci_io_request_terminate(struct isci_request *ireq); enum sci_status @@ -381,27 +329,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) return ireq->request_daddr + (requested_addr - base_addr); } -/** - * This function gets the status of the request object. - * @request: This parameter points to the isci_request object - * - * status of the object as a isci_request_status enum. - */ -static inline enum isci_request_status -isci_request_get_state(struct isci_request *isci_request) -{ - BUG_ON(isci_request == NULL); - - /*probably a bad sign... */ - if (isci_request->status == unallocated) - dev_warn(&isci_request->isci_host->pdev->dev, - "%s: isci_request->status == unallocated\n", - __func__); - - return isci_request->status; -} - - /** * isci_request_change_state() - This function sets the status of the request * object. diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d040aa2f372..20112cd5b64 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -654,7 +654,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, * needs to be detached and freed here. */ spin_lock_irqsave(&isci_request->state_lock, flags); - request_status = isci_request_get_state(isci_request); + request_status = isci_request->status; if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ && ((request_status == aborted) -- cgit v1.2.3-70-g09d2 From 16ba77091b44af28b3ff3318b4a2aa4fbf7d4c24 Mon Sep 17 00:00:00 2001 From: Dan Williams <dan.j.williams@intel.com> Date: Fri, 1 Jul 2011 10:52:55 -0700 Subject: isci: merge sata.[ch] into request.c Undo some needless separation. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/scsi/isci/Makefile | 2 +- drivers/scsi/isci/request.c | 71 ++++++++------ drivers/scsi/isci/request.h | 2 - drivers/scsi/isci/sata.c | 232 -------------------------------------------- drivers/scsi/isci/sata.h | 79 --------------- drivers/scsi/isci/task.c | 100 +++++++++++++++---- drivers/scsi/isci/task.h | 12 --- 7 files changed, 120 insertions(+), 378 deletions(-) delete mode 100644 drivers/scsi/isci/sata.c delete mode 100644 drivers/scsi/isci/sata.h (limited to 'drivers/scsi/isci/request.h') diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile index 42449701ad9..3359e10e0d8 100644 --- a/drivers/scsi/isci/Makefile +++ b/drivers/scsi/isci/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_SCSI_ISCI) += isci.o -isci-objs := init.o phy.o request.o sata.o \ +isci-objs := init.o phy.o request.o \ remote_device.o port.o \ host.o task.o probe_roms.o \ remote_node_context.o \ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 7c500bb6a8e..33c8ed1741e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -56,7 +56,6 @@ #include "isci.h" #include "task.h" #include "request.h" -#include "sata.h" #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "sas.h" @@ -1092,16 +1091,6 @@ smp_request_await_tc_event(struct isci_request *ireq, return SCI_SUCCESS; } -void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, - u16 ncq_tag) -{ - /** - * @note This could be made to return an error to the user if the user - * attempts to set the NCQ tag in the wrong state. - */ - ireq->tc->type.stp.ncq_tag = ncq_tag; -} - static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) { struct scu_sgl_element *sgl; @@ -2410,6 +2399,29 @@ static void isci_task_save_for_upper_layer_completion( } } +static void isci_request_process_stp_response(struct sas_task *task, + void *response_buffer) +{ + struct dev_to_host_fis *d2h_reg_fis = response_buffer; + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (void *)&ts->buf[0]; + + resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); + memcpy(&resp->ending_fis[0], response_buffer + 16, 24); + ts->buf_valid_size = sizeof(*resp); + + /** + * If the device fault bit is set in the status register, then + * set the sense data and return. + */ + if (d2h_reg_fis->status & ATA_DF) + ts->stat = SAS_PROTO_RESPONSE; + else + ts->stat = SAM_STAT_GOOD; + + ts->resp = SAS_TASK_COMPLETE; +} + static void isci_request_io_request_complete(struct isci_host *ihost, struct isci_request *request, enum sci_io_status completion_status) @@ -2985,34 +2997,29 @@ static enum sci_status isci_request_ssp_request_construct( return status; } -static enum sci_status isci_request_stp_request_construct( - struct isci_request *request) +static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) { - struct sas_task *task = isci_request_access_task(request); + struct sas_task *task = isci_request_access_task(ireq); + struct host_to_dev_fis *fis = &ireq->stp.cmd; + struct ata_queued_cmd *qc = task->uldd_task; enum sci_status status; - struct host_to_dev_fis *register_fis; - dev_dbg(&request->isci_host->pdev->dev, - "%s: request = %p\n", + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: ireq = %p\n", __func__, - request); - - /* Get the host_to_dev_fis from the core and copy - * the fis from the task into it. - */ - register_fis = isci_sata_task_to_fis_copy(task); + ireq); - status = sci_io_request_construct_basic_sata(request); + memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (!task->ata_task.device_control_reg_update) + fis->flags |= 0x80; + fis->flags &= 0xF0; - /* Set the ncq tag in the fis, from the queue - * command in the task. - */ - if (isci_sata_is_task_ncq(task)) { + status = sci_io_request_construct_basic_sata(ireq); - isci_sata_set_ncq_tag( - register_fis, - task - ); + if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ)) { + fis->sector_count = qc->tag << 3; + ireq->tc->type.stp.ncq_tag = qc->tag; } return status; diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 59708402781..11bc279457a 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -435,8 +435,6 @@ enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq); enum sci_status sci_task_request_construct_sata(struct isci_request *ireq); -void -sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); void sci_smp_request_copy_response(struct isci_request *ireq); static inline int isci_task_is_ncq_recovery(struct sas_task *task) diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c deleted file mode 100644 index 47b96c21548..00000000000 --- a/drivers/scsi/isci/sata.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <scsi/sas.h> -#include "isci.h" -#include "remote_device.h" -#include "task.h" -#include "request.h" -#include "sata.h" - -/** - * isci_sata_task_to_fis_copy() - This function gets the host_to_dev_fis from - * the core and copies the fis from the task into it. - * @task: This parameter is a pointer to the task struct from libsas. - * - * pointer to the host_to_dev_fis from the core request object. - */ -struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task) -{ - struct isci_request *ireq = task->lldd_task; - struct host_to_dev_fis *fis = &ireq->stp.cmd; - - memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - - if (!task->ata_task.device_control_reg_update) - fis->flags |= 0x80; - - fis->flags &= 0xF0; - - return fis; -} - -/** - * isci_sata_is_task_ncq() - This function determines if the given stp task is - * a ncq request. - * @task: This parameter is a pointer to the task struct from libsas. - * - * true if the task is ncq - */ -bool isci_sata_is_task_ncq(struct sas_task *task) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - bool ret = (qc && - (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ)); - - return ret; -} - -/** - * isci_sata_set_ncq_tag() - This function sets the ncq tag field in the - * host_to_dev_fis equal to the tag in the queue command in the task. - * @task: This parameter is a pointer to the task struct from libsas. - * @register_fis: This parameter is a pointer to the host_to_dev_fis from the - * core request object. - * - */ -void isci_sata_set_ncq_tag( - struct host_to_dev_fis *register_fis, - struct sas_task *task) -{ - struct ata_queued_cmd *qc = task->uldd_task; - struct isci_request *request = task->lldd_task; - - register_fis->sector_count = qc->tag << 3; - sci_stp_io_request_set_ncq_tag(request, qc->tag); -} - -/** - * isci_request_process_stp_response() - This function sets the status and - * response, in the task struct, from the request object for the upper layer - * driver. - * @sas_task: This parameter is the task struct from the upper layer driver. - * @response_buffer: This parameter points to the response of the completed - * request. - * - * none. - */ -void isci_request_process_stp_response(struct sas_task *task, - void *response_buffer) -{ - struct dev_to_host_fis *d2h_reg_fis = response_buffer; - struct task_status_struct *ts = &task->task_status; - struct ata_task_resp *resp = (void *)&ts->buf[0]; - - resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); - memcpy(&resp->ending_fis[0], response_buffer + 16, 24); - ts->buf_valid_size = sizeof(*resp); - - /** - * If the device fault bit is set in the status register, then - * set the sense data and return. - */ - if (d2h_reg_fis->status & ATA_DF) - ts->stat = SAS_PROTO_RESPONSE; - else - ts->stat = SAM_STAT_GOOD; - - ts->resp = SAS_TASK_COMPLETE; -} - -enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) -{ - struct isci_tmf *isci_tmf; - enum sci_status status; - - if (tmf_task != ireq->ttype) - return SCI_FAILURE; - - isci_tmf = isci_request_access_tmf(ireq); - - switch (isci_tmf->tmf_code) { - - case isci_tmf_sata_srst_high: - case isci_tmf_sata_srst_low: { - struct host_to_dev_fis *fis = &ireq->stp.cmd; - - memset(fis, 0, sizeof(*fis)); - - fis->fis_type = 0x27; - fis->flags &= ~0x80; - fis->flags &= 0xF0; - if (isci_tmf->tmf_code == isci_tmf_sata_srst_high) - fis->control |= ATA_SRST; - else - fis->control &= ~ATA_SRST; - break; - } - /* other management commnd go here... */ - default: - return SCI_FAILURE; - } - - /* core builds the protocol specific request - * based on the h2d fis. - */ - status = sci_task_request_construct_sata(ireq); - - return status; -} - -/** - * isci_task_send_lu_reset_sata() - This function is called by of the SAS - * Domain Template functions. This is one of the Task Management functoins - * called by libsas, to reset the given SAS lun. Note the assumption that - * while this call is executing, no I/O will be sent by the host to the - * device. - * @lun: This parameter specifies the lun to be reset. - * - * status, zero indicates success. - */ -int isci_task_send_lu_reset_sata( - struct isci_host *isci_host, - struct isci_remote_device *isci_device, - u8 *lun) -{ - struct isci_tmf tmf; - int ret = TMF_RESP_FUNC_FAILED; - - /* Send the soft reset to the target */ - #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ - isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL); - - ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, - ISCI_SRST_TIMEOUT_MS); - - if (ret != TMF_RESP_FUNC_COMPLETE) { - dev_warn(&isci_host->pdev->dev, - "%s: Assert SRST failed (%p) = %x", - __func__, - isci_device, - ret); - - /* Return the failure so that the LUN reset is escalated - * to a target reset. - */ - } - return ret; -} diff --git a/drivers/scsi/isci/sata.h b/drivers/scsi/isci/sata.h deleted file mode 100644 index 1b89f1f7577..00000000000 --- a/drivers/scsi/isci/sata.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * BSD LICENSE - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -struct host_to_dev_fis *isci_sata_task_to_fis_copy( - struct sas_task *task); - -bool isci_sata_is_task_ncq( - struct sas_task *task); - -void isci_sata_set_ncq_tag( - struct host_to_dev_fis *register_fis, - struct sas_task *task); - -void isci_request_process_stp_response( - struct sas_task *task, - void *response_buffer); - -u8 isci_sata_get_sat_protocol( - struct isci_request *isci_request); - -enum sci_status isci_sata_management_task_request_build( - struct isci_request *isci_request); - -int isci_task_send_lu_reset_sata( - struct isci_host *isci_host, - struct isci_remote_device *isci_device, - u8 *lun); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 20112cd5b64..5d962b6b03e 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -61,7 +61,6 @@ #include "remote_node_context.h" #include "isci.h" #include "request.h" -#include "sata.h" #include "task.h" #include "host.h" @@ -238,6 +237,46 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) return 0; } +static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) +{ + struct isci_tmf *isci_tmf; + enum sci_status status; + + if (tmf_task != ireq->ttype) + return SCI_FAILURE; + + isci_tmf = isci_request_access_tmf(ireq); + + switch (isci_tmf->tmf_code) { + + case isci_tmf_sata_srst_high: + case isci_tmf_sata_srst_low: { + struct host_to_dev_fis *fis = &ireq->stp.cmd; + + memset(fis, 0, sizeof(*fis)); + + fis->fis_type = 0x27; + fis->flags &= ~0x80; + fis->flags &= 0xF0; + if (isci_tmf->tmf_code == isci_tmf_sata_srst_high) + fis->control |= ATA_SRST; + else + fis->control &= ~ATA_SRST; + break; + } + /* other management commnd go here... */ + default: + return SCI_FAILURE; + } + + /* core builds the protocol specific request + * based on the h2d fis. + */ + status = sci_task_request_construct_sata(ireq); + + return status; +} + static struct isci_request *isci_task_request_build(struct isci_host *ihost, struct isci_remote_device *idev, u16 tag, struct isci_tmf *isci_tmf) @@ -287,9 +326,9 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return ireq; } -int isci_task_execute_tmf(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_tmf *tmf, unsigned long timeout_ms) +static int isci_task_execute_tmf(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_task_status status = SCI_TASK_FAILURE; @@ -401,13 +440,12 @@ int isci_task_execute_tmf(struct isci_host *ihost, return ret; } -void isci_task_build_tmf( - struct isci_tmf *tmf, - enum isci_tmf_function_codes code, - void (*tmf_sent_cb)(enum isci_tmf_cb_state, - struct isci_tmf *, - void *), - void *cb_data) +static void isci_task_build_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code, + void (*tmf_sent_cb)(enum isci_tmf_cb_state, + struct isci_tmf *, + void *), + void *cb_data) { memset(tmf, 0, sizeof(*tmf)); @@ -416,16 +454,14 @@ void isci_task_build_tmf( tmf->cb_data = cb_data; } -static void isci_task_build_abort_task_tmf( - struct isci_tmf *tmf, - enum isci_tmf_function_codes code, - void (*tmf_sent_cb)(enum isci_tmf_cb_state, - struct isci_tmf *, - void *), - struct isci_request *old_request) +static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code, + void (*tmf_sent_cb)(enum isci_tmf_cb_state, + struct isci_tmf *, + void *), + struct isci_request *old_request) { - isci_task_build_tmf(tmf, code, tmf_sent_cb, - (void *)old_request); + isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); tmf->io_tag = old_request->io_tag; } @@ -804,6 +840,30 @@ static int isci_task_send_lu_reset_sas( return ret; } +static int isci_task_send_lu_reset_sata(struct isci_host *ihost, + struct isci_remote_device *idev, u8 *lun) +{ + int ret = TMF_RESP_FUNC_FAILED; + struct isci_tmf tmf; + + /* Send the soft reset to the target */ + #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ + isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL); + + ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS); + + if (ret != TMF_RESP_FUNC_COMPLETE) { + dev_warn(&ihost->pdev->dev, + "%s: Assert SRST failed (%p) = %x", + __func__, idev, ret); + + /* Return the failure so that the LUN reset is escalated + * to a target reset. + */ + } + return ret; +} + /** * isci_task_lu_reset() - This function is one of the SAS Domain Template * functions. This is one of the Task Management functoins called by libsas, diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 42019de2380..4a7fa90287e 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -211,18 +211,6 @@ int isci_queuecommand( int isci_bus_reset_handler(struct scsi_cmnd *cmd); -void isci_task_build_tmf( - struct isci_tmf *tmf, - enum isci_tmf_function_codes code, - void (*tmf_sent_cb)(enum isci_tmf_cb_state, - struct isci_tmf *, - void *), - void *cb_data); - -int isci_task_execute_tmf(struct isci_host *isci_host, - struct isci_remote_device *idev, - struct isci_tmf *tmf, unsigned long timeout_ms); - /** * enum isci_completion_selection - This enum defines the possible actions to * take with respect to a given request's notification back to libsas. -- cgit v1.2.3-70-g09d2