summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorStephen M. Cameron <stephenmcameron@gmail.com>2014-11-15 02:26:59 +0300
committerChristoph Hellwig <hch@lst.de>2014-11-20 11:11:24 +0300
commit50a0decf75b66480aa5b076d4e1bca11bc202efe (patch)
tree6e562079679e9310ebfd174cdbe84128e5a4b74c /drivers/scsi
parent92084715f4d296c99ac120b3b77cf72d5c194c86 (diff)
downloadlinux-50a0decf75b66480aa5b076d4e1bca11bc202efe.tar.xz
hpsa: fix endianness issue with scatter gather elements
The hardware needs little endian scatter gather addresses and lengths but we were not bothering to convert from cpu byte order as we should have been. On Intel, this is all just a bunch of no-ops macros, but it makes the code endian-clean(er). Signed-off-by: Don Brace <don.brace@pmcs.com> Signed-off-by: Robert Elliott <elliott@hp.com> Reviewed-by: Webb Scales <webbnh@hp.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/hpsa.c223
-rw-r--r--drivers/scsi/hpsa_cmd.h14
2 files changed, 107 insertions, 130 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e788e6822b1b..b082594e7c69 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1502,22 +1502,22 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
{
struct SGDescriptor *chain_sg, *chain_block;
u64 temp64;
+ u32 chain_len;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
chain_block = h->cmd_sg_list[c->cmdindex];
- chain_sg->Ext = HPSA_SG_CHAIN;
- chain_sg->Len = sizeof(*chain_sg) *
+ chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
+ chain_len = sizeof(*chain_sg) *
(c->Header.SGTotal - h->max_cmd_sg_entries);
- temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
+ chain_sg->Len = cpu_to_le32(chain_len);
+ temp64 = pci_map_single(h->pdev, chain_block, chain_len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
- chain_sg->Addr.lower = 0;
- chain_sg->Addr.upper = 0;
+ chain_sg->Addr = cpu_to_le64(0);
return -1;
}
- chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
- chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ chain_sg->Addr = cpu_to_le64(temp64);
return 0;
}
@@ -1525,15 +1525,13 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg;
- union u64bit temp64;
- if (c->Header.SGTotal <= h->max_cmd_sg_entries)
+ if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
- temp64.val32.lower = chain_sg->Addr.lower;
- temp64.val32.upper = chain_sg->Addr.upper;
- pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
+ pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
+ le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
}
@@ -1734,8 +1732,7 @@ static void complete_scsi_command(struct CommandList *cp)
struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
- cp->Header.Tag.lower = c->Tag.lower;
- cp->Header.Tag.upper = c->Tag.upper;
+ cp->Header.tag = c->tag;
memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
@@ -1936,14 +1933,11 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
struct CommandList *c, int sg_used, int data_direction)
{
int i;
- union u64bit addr64;
- for (i = 0; i < sg_used; i++) {
- addr64.val32.lower = c->SG[i].Addr.lower;
- addr64.val32.upper = c->SG[i].Addr.upper;
- pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
- data_direction);
- }
+ for (i = 0; i < sg_used; i++)
+ pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+ le32_to_cpu(c->SG[i].Len),
+ data_direction);
}
static int hpsa_map_one(struct pci_dev *pdev,
@@ -1956,25 +1950,22 @@ static int hpsa_map_one(struct pci_dev *pdev,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
- cp->Header.SGTotal = 0;
+ cp->Header.SGTotal = cpu_to_le16(0);
return 0;
}
- addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ addr64 = pci_map_single(pdev, buf, buflen, data_direction);
if (dma_mapping_error(&pdev->dev, addr64)) {
/* Prevent subsequent unmap of something never mapped */
cp->Header.SGList = 0;
- cp->Header.SGTotal = 0;
+ cp->Header.SGTotal = cpu_to_le16(0);
return -1;
}
- cp->SG[0].Addr.lower =
- (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
- cp->SG[0].Addr.upper =
- (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
- cp->SG[0].Len = buflen;
- cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
- cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ cp->SG[0].Addr = cpu_to_le64(addr64);
+ cp->SG[0].Len = cpu_to_le32(buflen);
+ cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
+ cp->Header.SGList = 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
return 0;
}
@@ -2832,8 +2823,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
if (d == NULL)
return 0; /* no match */
- it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
- scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
+ it_nexus = cpu_to_le32(d->ioaccel_handle);
+ scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
find = c2a->scsi_nexus;
if (h->raid_offload_debug > 0)
@@ -3212,19 +3203,19 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
}
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
- curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
- curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
- curr_sg->Len = len;
- curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
+ curr_sg->Addr = cpu_to_le64(addr64);
+ curr_sg->Len = cpu_to_le32(len);
+ curr_sg->Ext = cpu_to_le32(0);
curr_sg++;
}
+ (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
if (use_sg + chained > h->maxSG)
h->maxSG = use_sg + chained;
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
- cp->Header.SGTotal = (u16) (use_sg + 1);
+ cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
if (hpsa_map_sg_chain_block(h, cp)) {
scsi_dma_unmap(cmd);
return -1;
@@ -3235,7 +3226,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
sglist_finished:
cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
+ cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
return 0;
}
@@ -3327,17 +3318,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
total_len += len;
- curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
- curr_sg->Addr.upper =
- (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
- curr_sg->Len = len;
-
- if (i == (scsi_sg_count(cmd) - 1))
- curr_sg->Ext = HPSA_SG_LAST;
- else
- curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg->Addr = cpu_to_le64(addr64);
+ curr_sg->Len = cpu_to_le32(len);
+ curr_sg->Ext = cpu_to_le32(0);
curr_sg++;
}
+ (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE:
@@ -3594,7 +3580,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
cp->data_len = cpu_to_le32(total_len);
cp->err_ptr = cpu_to_le64(c->busaddr +
offsetof(struct io_accel2_cmd, error_data));
- cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
+ cp->err_len = cpu_to_le32(sizeof(cp->error_data));
enqueue_cmd_and_start_io(h, c);
return 0;
@@ -4023,8 +4009,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
- c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
+ c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT);
/* Fill in the request block... */
@@ -4326,8 +4312,8 @@ static void hpsa_get_tag(struct ctlr_info *h,
if (c->cmd_type == CMD_IOACCEL1) {
struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
&h->ioaccel_cmd_pool[c->cmdindex];
- *tagupper = cm1->Tag.upper;
- *taglower = cm1->Tag.lower;
+ *tagupper = (u32) (cm1->tag >> 32);
+ *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
return;
}
if (c->cmd_type == CMD_IOACCEL2) {
@@ -4338,11 +4324,10 @@ static void hpsa_get_tag(struct ctlr_info *h,
*taglower = cm2->Tag;
return;
}
- *tagupper = c->Header.Tag.upper;
- *taglower = c->Header.Tag.lower;
+ *tagupper = (u32) (c->Header.tag >> 32);
+ *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
}
-
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
struct CommandList *abort, int swizzle)
{
@@ -4429,7 +4414,7 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(c, queue_head, list) {
- if (memcmp(&c->Header.Tag, tag, 8) != 0)
+ if (memcmp(&c->Header.tag, tag, 8) != 0)
continue;
spin_unlock_irqrestore(&h->lock, flags);
return c;
@@ -4711,9 +4696,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(*c->err_info);
+ c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
+ c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
c->h = h;
return c;
@@ -4726,7 +4710,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
{
struct CommandList *c;
- union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
@@ -4747,10 +4730,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
- temp64.val = (u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(*c->err_info);
+ c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
+ c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
c->h = h;
return c;
@@ -4770,12 +4751,9 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
{
- union u64bit temp64;
-
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
pci_free_consistent(h->pdev, sizeof(*c->err_info),
- c->err_info, (dma_addr_t) temp64.val);
+ c->err_info,
+ (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
pci_free_consistent(h->pdev, sizeof(*c),
c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
@@ -4930,7 +4908,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
IOCTL_Command_struct iocommand;
struct CommandList *c;
char *buff = NULL;
- union u64bit temp64;
+ u64 temp64;
int rc = 0;
if (!argp)
@@ -4969,14 +4947,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
c->Header.SGList = 1;
- c->Header.SGTotal = 1;
+ c->Header.SGTotal = cpu_to_le16(1);
} else { /* no buffers to fill */
c->Header.SGList = 0;
- c->Header.SGTotal = 0;
+ c->Header.SGTotal = cpu_to_le16(0);
}
memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
/* use the kernel address the cmd block for tag */
- c->Header.Tag.lower = c->busaddr;
+ c->Header.tag = c->busaddr;
/* Fill in Request block */
memcpy(&c->Request, &iocommand.Request,
@@ -4984,19 +4962,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64.val = pci_map_single(h->pdev, buff,
+ temp64 = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- c->SG[0].Addr.lower = 0;
- c->SG[0].Addr.upper = 0;
- c->SG[0].Len = 0;
+ if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
+ c->SG[0].Addr = cpu_to_le64(0);
+ c->SG[0].Len = cpu_to_le32(0);
rc = -ENOMEM;
goto out;
}
- c->SG[0].Addr.lower = temp64.val32.lower;
- c->SG[0].Addr.upper = temp64.val32.upper;
- c->SG[0].Len = iocommand.buf_size;
- c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
+ c->SG[0].Addr = cpu_to_le64(temp64);
+ c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
+ c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (iocommand.buf_size > 0)
@@ -5031,7 +5007,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
struct CommandList *c;
unsigned char **buff = NULL;
int *buff_size = NULL;
- union u64bit temp64;
+ u64 temp64;
BYTE sg_used = 0;
int status = 0;
int i;
@@ -5105,29 +5081,30 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
- c->Header.SGList = c->Header.SGTotal = sg_used;
+ c->Header.SGList = (u8) sg_used;
+ c->Header.SGTotal = cpu_to_le16(sg_used);
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
- c->Header.Tag.lower = c->busaddr;
+ c->Header.tag = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
- temp64.val = pci_map_single(h->pdev, buff[i],
+ temp64 = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- c->SG[i].Addr.lower = 0;
- c->SG[i].Addr.upper = 0;
- c->SG[i].Len = 0;
+ if (dma_mapping_error(&h->pdev->dev,
+ (dma_addr_t) temp64)) {
+ c->SG[i].Addr = cpu_to_le64(0);
+ c->SG[i].Len = cpu_to_le32(0);
hpsa_pci_unmap(h->pdev, c, i,
PCI_DMA_BIDIRECTIONAL);
status = -ENOMEM;
goto cleanup0;
}
- c->SG[i].Addr.lower = temp64.val32.lower;
- c->SG[i].Addr.upper = temp64.val32.upper;
- c->SG[i].Len = buff_size[i];
- c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
+ c->SG[i].Addr = cpu_to_le64(temp64);
+ c->SG[i].Len = cpu_to_le32(buff_size[i]);
+ c->SG[i].Ext = cpu_to_le32(0);
}
+ c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (sg_used)
@@ -5266,17 +5243,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
{
int pci_dir = XFER_NONE;
struct CommandList *a; /* for commands to be aborted */
+ u32 tupper, tlower;
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
if (buff != NULL && size > 0) {
c->Header.SGList = 1;
- c->Header.SGTotal = 1;
+ c->Header.SGTotal = cpu_to_le16(1);
} else {
c->Header.SGList = 0;
- c->Header.SGTotal = 0;
+ c->Header.SGTotal = cpu_to_le16(0);
}
- c->Header.Tag.lower = c->busaddr;
+ c->Header.tag = c->busaddr;
memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
@@ -5374,9 +5352,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
break;
case HPSA_ABORT_MSG:
a = buff; /* point to command to be aborted */
- dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
- a->Header.Tag.upper, a->Header.Tag.lower,
- c->Header.Tag.upper, c->Header.Tag.lower);
+ dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
+ a->Header.tag, c->Header.tag);
+ tlower = (u32) (a->Header.tag >> 32);
+ tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
c->Request.CDBLen = 16;
c->Request.Type.Type = TYPE_MSG;
c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -5387,14 +5366,14 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[2] = 0x00; /* reserved */
c->Request.CDB[3] = 0x00; /* reserved */
/* Tag to abort goes in CDB[4]-CDB[11] */
- c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
- c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
- c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
- c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
- c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
- c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
- c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
- c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
+ c->Request.CDB[4] = tlower & 0xFF;
+ c->Request.CDB[5] = (tlower >> 8) & 0xFF;
+ c->Request.CDB[6] = (tlower >> 16) & 0xFF;
+ c->Request.CDB[7] = (tlower >> 24) & 0xFF;
+ c->Request.CDB[8] = tupper & 0xFF;
+ c->Request.CDB[9] = (tupper >> 8) & 0xFF;
+ c->Request.CDB[10] = (tupper >> 16) & 0xFF;
+ c->Request.CDB[11] = (tupper >> 24) & 0xFF;
c->Request.CDB[12] = 0x00; /* reserved */
c->Request.CDB[13] = 0x00; /* reserved */
c->Request.CDB[14] = 0x00; /* reserved */
@@ -5763,9 +5742,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
cmd->CommandHeader.ReplyQueue = 0;
cmd->CommandHeader.SGList = 0;
- cmd->CommandHeader.SGTotal = 0;
- cmd->CommandHeader.Tag.lower = paddr32;
- cmd->CommandHeader.Tag.upper = 0;
+ cmd->CommandHeader.SGTotal = cpu_to_le16(0);
+ cmd->CommandHeader.tag = paddr32;
memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
cmd->Request.CDBLen = 16;
@@ -5776,9 +5754,9 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
cmd->Request.CDB[0] = opcode;
cmd->Request.CDB[1] = type;
memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
- cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
- cmd->ErrorDescriptor.Addr.upper = 0;
- cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
+ cmd->ErrorDescriptor.Addr =
+ cpu_to_le64((paddr32 + sizeof(*cmd)));
+ cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
@@ -7429,13 +7407,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
cp->timeout_sec = 0;
cp->ReplyQueue = 0;
- cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
- DIRECT_LOOKUP_BIT;
- cp->Tag.upper = 0;
- cp->host_addr.lower =
- (u32) (h->ioaccel_cmd_pool_dhandle +
+ cp->tag =
+ cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT);
+ cp->host_addr =
+ cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
(i * sizeof(struct io_accel1_cmd)));
- cp->host_addr.upper = 0;
}
} else if (trans_support & CFGTBL_Trans_io_accel2) {
u64 cfg_offset, cfg_base_addr_index;
@@ -7709,7 +7686,7 @@ static void __attribute__((unused)) verify_offsets(void)
VERIFY_OFFSET(timeout_sec, 0x62);
VERIFY_OFFSET(ReplyQueue, 0x64);
VERIFY_OFFSET(reserved9, 0x65);
- VERIFY_OFFSET(Tag, 0x68);
+ VERIFY_OFFSET(tag, 0x68);
VERIFY_OFFSET(host_addr, 0x70);
VERIFY_OFFSET(CISS_LUN, 0x78);
VERIFY_OFFSET(SG, 0x78 + 8);
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 9b19042ff330..575eda8a8c5e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -314,7 +314,7 @@ struct CommandListHeader {
u8 ReplyQueue;
u8 SGList;
u16 SGTotal;
- struct vals32 Tag;
+ u64 tag;
union LUNAddr LUN;
};
@@ -330,12 +330,12 @@ struct RequestBlock {
};
struct ErrDescriptor {
- struct vals32 Addr;
+ u64 Addr;
u32 Len;
};
struct SGDescriptor {
- struct vals32 Addr;
+ u64 Addr;
u32 Len;
u32 Ext;
};
@@ -434,8 +434,8 @@ struct io_accel1_cmd {
u16 timeout_sec; /* 0x62 - 0x63 */
u8 ReplyQueue; /* 0x64 */
u8 reserved9[3]; /* 0x65 - 0x67 */
- struct vals32 Tag; /* 0x68 - 0x6F */
- struct vals32 host_addr; /* 0x70 - 0x77 */
+ u64 tag; /* 0x68 - 0x6F */
+ u64 host_addr; /* 0x70 - 0x77 */
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -555,8 +555,8 @@ struct hpsa_tmf_struct {
u8 reserved1; /* byte 3 Reserved */
u32 it_nexus; /* SCSI I-T Nexus */
u8 lun_id[8]; /* LUN ID for TMF request */
- struct vals32 Tag; /* cciss tag associated w/ request */
- struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
+ u64 tag; /* cciss tag associated w/ request */
+ u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */
u64 error_ptr; /* Error Pointer */
u32 error_len; /* Error Length */
};