summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 02:46:03 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 02:46:03 +0300
commitbc231d9ede99518b67a77544d9084f15b898fe2e (patch)
treed407236d988cc82926b99ecd49d45122828108d6 /drivers/misc
parent62a0027839a4a69bc5d2696672242019a6bb6221 (diff)
parent3cd0b53553ce28da6fc828c601041c974e1c4dde (diff)
downloadlinux-bc231d9ede99518b67a77544d9084f15b898fe2e.tar.xz
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar: "The main change is the addition of SGI/UV4 support" * 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86/platform/UV: Fix incorrect nodes and pnodes for cpuless and memoryless nodes x86/platform/UV: Remove Obsolete GRU MMR address translation x86/platform/UV: Update physical address conversions for UV4 x86/platform/UV: Build GAM reference tables x86/platform/UV: Support UV4 socket address changes x86/platform/UV: Add obtaining GAM Range Table from UV BIOS x86/platform/UV: Add UV4 addressing discovery function x86/platform/UV: Fold blade info into per node hub info structs x86/platform/UV: Allocate common per node hub info structs on local node x86/platform/UV: Move blade local processor ID to the per cpu info struct x86/platform/UV: Move scir info to the per cpu info struct x86/platform/UV: Create per cpu info structs to replace per hub info structs x86/platform/UV: Update MMIOH setup function to work for both UV3 and UV4 x86/platform/UV: Clean up redunduncies after merge of UV4 MMR definitions x86/platform/UV: Add UV4 Specific MMR definitions x86/platform/UV: Prep for UV4 MMR updates x86/platform/UV: Add UV MMR Illegal Access Function x86/platform/UV: Add UV4 Specific Defines x86/platform/UV: Add UV Architecture Defines x86/platform/UV: Add Initial UV4 definitions ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/sgi-gru/grukservices.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 967b9dd24fe9..030769018461 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -718,8 +718,8 @@ cberr:
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m, *val = mesg, gpa, save;
- int ret;
+ unsigned long m;
+ int ret, loops = 200; /* experimentally determined */
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@@ -735,22 +735,28 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
return MQE_OK;
/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by
- * hardware but some error conditions require explicit delivery.
- * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * Send a noop message in order to deliver a cross-partition interrupt
+ * to the SSI that contains the target message queue. Normally, the
+ * interrupt is automatically delivered by hardware following mesq
+ * operations, but some error conditions require explicit delivery.
+ * The noop message will trigger delivery. Otherwise partition failures
* could cause unrecovered errors.
*/
- gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
- save = *val;
- *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
- dest_Fixed);
- gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
- ret = gru_wait(cb);
- *val = save;
- if (ret != CBS_IDLE)
- return MQE_UNEXPECTED_CB_ERR;
- return MQE_OK;
+ do {
+ ret = send_noop_message(cb, mqd, mesg);
+ } while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
+
+ if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
+ /*
+ * Don't indicate to the app to resend the message, as it's
+ * already been successfully sent. We simply send an OK
+ * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
+ * assuming that the other side is receiving enough
+ * interrupts to get this message processed anyway.
+ */
+ ret = MQE_OK;
+ }
+ return ret;
}
/*