summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-08 04:09:24 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-08 04:09:24 +0400
commit2d53056973079e6c2ffc0d7ae3afbdd3d4f18ae3 (patch)
treee921596d80cd0a6434629dbd8d22c0ca3ec14b88
parent9e50ab91d025afc17ca14a1764be2e1d0c24245d (diff)
parente78483c5aeb0d7fbb0e365802145f1045e62957e (diff)
downloadlinux-2d53056973079e6c2ffc0d7ae3afbdd3d4f18ae3.tar.xz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (82 commits) firewire: core: add forgotten dummy driver methods, remove unused ones firewire: add isochronous multichannel reception firewire: core: small clarifications in core-cdev firewire: core: remove unused code firewire: ohci: release channel in error path firewire: ohci: use memory barriers to order descriptor updates tools/firewire: nosy-dump: increment program version tools/firewire: nosy-dump: remove unused code tools/firewire: nosy-dump: use linux/firewire-constants.h tools/firewire: nosy-dump: break up a deeply nested function tools/firewire: nosy-dump: make some symbols static or const tools/firewire: nosy-dump: change to kernel coding style tools/firewire: nosy-dump: work around segfault in decode_fcp tools/firewire: nosy-dump: fix it on x86-64 tools/firewire: add userspace front-end of nosy firewire: nosy: note ioctls in ioctl-number.txt firewire: nosy: use generic printk macros firewire: nosy: endianess fixes and annotations firewire: nosy: annotate __user pointers and __iomem pointers firewire: nosy: fix device shutdown with active client ...
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/firewire/Kconfig24
-rw-r--r--drivers/firewire/Makefile1
-rw-r--r--drivers/firewire/core-card.c218
-rw-r--r--drivers/firewire/core-cdev.c409
-rw-r--r--drivers/firewire/core-device.c11
-rw-r--r--drivers/firewire/core-iso.c34
-rw-r--r--drivers/firewire/core-topology.c22
-rw-r--r--drivers/firewire/core-transaction.c306
-rw-r--r--drivers/firewire/core.h24
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/nosy-user.h25
-rw-r--r--drivers/firewire/nosy.c721
-rw-r--r--drivers/firewire/nosy.h237
-rw-r--r--drivers/firewire/ohci.c701
-rw-r--r--drivers/firewire/ohci.h1
-rw-r--r--drivers/firewire/sbp2.c13
-rw-r--r--drivers/ieee1394/dv1394.c18
-rw-r--r--drivers/ieee1394/eth1394.c3
-rw-r--r--drivers/ieee1394/raw1394.c7
-rw-r--r--drivers/ieee1394/sbp2.c11
-rw-r--r--drivers/ieee1394/video1394.c17
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c4
-rw-r--r--include/linux/firewire-cdev.h501
-rw-r--r--include/linux/firewire.h62
-rw-r--r--tools/firewire/Makefile19
-rw-r--r--tools/firewire/decode-fcp.c213
-rw-r--r--tools/firewire/list.h62
-rw-r--r--tools/firewire/nosy-dump.c1031
-rw-r--r--tools/firewire/nosy-dump.h173
31 files changed, 4303 insertions, 571 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 2ec3d7d89984..33223ff121d8 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -79,6 +79,7 @@ Code Seq#(hex) Include File Comments
0x22 all scsi/sg.h
'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
'$' 00-0F linux/perf_counter.h, linux/perf_event.h
+'&' 00-07 drivers/firewire/nosy-user.h
'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
<ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
'2' 01-04 linux/i2o.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 1349d1cb5aca..3f49df2d977e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2324,6 +2324,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
S: Maintained
F: drivers/firewire/
F: include/linux/firewire*.h
+F: tools/firewire/
FIRMWARE LOADER (request_firmware)
S: Orphan
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index a9371b36a9b9..fcf3ea28340b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -66,4 +66,28 @@ config FIREWIRE_NET
source "drivers/ieee1394/Kconfig"
+config FIREWIRE_NOSY
+ tristate "Nosy - a FireWire traffic sniffer for PCILynx cards"
+ depends on PCI
+ help
+ Nosy is an IEEE 1394 packet sniffer that is used for protocol
+ analysis and in development of IEEE 1394 drivers, applications,
+ or firmwares.
+
+ This driver lets you use a Texas Instruments PCILynx 1394 to PCI
+ link layer controller TSB12LV21/A/B as a low-budget bus analyzer.
+ PCILynx is a nowadays very rare IEEE 1394 controller which is
+ not OHCI 1394 compliant.
+
+ The following cards are known to be based on PCILynx or PCILynx-2:
+ IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
+ (PCI card), Newer Technology FireWire 2 Go (CardBus card),
+ Apple Power Mac G3 blue & white (onboard controller).
+
+ To compile this driver as a module, say M here: The module will be
+ called nosy. Source code of a userspace interface to nosy, called
+ nosy-dump, can be found in tools/firewire/ of the kernel sources.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index a8f9bb6d9fdf..3c6a7fb20aa7 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
+obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 371713ff0266..be0492398ef9 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -204,17 +204,62 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
+static int reset_bus(struct fw_card *card, bool short_reset)
+{
+ int reg = short_reset ? 5 : 1;
+ int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+ return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+
+void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
+{
+ /* We don't try hard to sort out requests of long vs. short resets. */
+ card->br_short = short_reset;
+
+ /* Use an arbitrary short delay to combine multiple reset requests. */
+ fw_card_get(card);
+ if (!schedule_delayed_work(&card->br_work,
+ delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ fw_card_put(card);
+}
+EXPORT_SYMBOL(fw_schedule_bus_reset);
+
+static void br_work(struct work_struct *work)
+{
+ struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+
+ /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
+ if (card->reset_jiffies != 0 &&
+ time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
+ if (!schedule_delayed_work(&card->br_work, 2 * HZ))
+ fw_card_put(card);
+ return;
+ }
+
+ fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
+ FW_PHY_CONFIG_CURRENT_GAP_COUNT);
+ reset_bus(card, card->br_short);
+ fw_card_put(card);
+}
+
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
- fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
- &bandwidth, true, card->bm_transaction_data);
- if (channel == 31) {
+ if (!card->broadcast_channel_allocated) {
+ fw_iso_resource_manage(card, generation, 1ULL << 31,
+ &channel, &bandwidth, true,
+ card->bm_transaction_data);
+ if (channel != 31) {
+ fw_notify("failed to allocate broadcast channel\n");
+ return;
+ }
card->broadcast_channel_allocated = true;
- device_for_each_child(card->device, (void *)(long)generation,
- fw_device_set_broadcast_channel);
}
+
+ device_for_each_child(card->device, (void *)(long)generation,
+ fw_device_set_broadcast_channel);
}
static const char gap_count_table[] = {
@@ -224,27 +269,26 @@ static const char gap_count_table[] = {
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
- if (!schedule_delayed_work(&card->work, delay))
+ if (!schedule_delayed_work(&card->bm_work, delay))
fw_card_put(card);
}
-static void fw_card_bm_work(struct work_struct *work)
+static void bm_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, work.work);
+ struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
- unsigned long flags;
- int root_id, new_root_id, irm_id, local_id;
+ int root_id, new_root_id, irm_id, bm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&card->lock);
if (card->local_node == NULL) {
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
goto out_put_card;
}
@@ -267,7 +311,8 @@ static void fw_card_bm_work(struct work_struct *work)
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
- if (is_next_generation(generation, card->bm_generation) ||
+ if ((is_next_generation(generation, card->bm_generation) &&
+ !card->bm_abdicate) ||
(card->bm_generation != generation && grace)) {
/*
* This first step is to figure out who is IRM and
@@ -298,21 +343,26 @@ static void fw_card_bm_work(struct work_struct *work)
card->bm_transaction_data[0] = cpu_to_be32(0x3f);
card->bm_transaction_data[1] = cpu_to_be32(local_id);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- card->bm_transaction_data,
- sizeof(card->bm_transaction_data));
+ card->bm_transaction_data, 8);
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
- if (rcode == RCODE_COMPLETE &&
- card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
+ bm_id = be32_to_cpu(card->bm_transaction_data[0]);
+ spin_lock_irq(&card->lock);
+ if (rcode == RCODE_COMPLETE && generation == card->generation)
+ card->bm_node_id =
+ bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
+ spin_unlock_irq(&card->lock);
+
+ if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
@@ -320,7 +370,17 @@ static void fw_card_bm_work(struct work_struct *work)
goto out;
}
- spin_lock_irqsave(&card->lock, flags);
+ if (rcode == RCODE_SEND_ERROR) {
+ /*
+ * We have been unable to send the lock request due to
+ * some local problem. Let's try again later and hope
+ * that the problem has gone away by then.
+ */
+ fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
+ goto out;
+ }
+
+ spin_lock_irq(&card->lock);
if (rcode != RCODE_COMPLETE) {
/*
@@ -339,7 +399,7 @@ static void fw_card_bm_work(struct work_struct *work)
* We weren't BM in the last generation, and the last
* bus reset is less than 125ms ago. Reschedule this job.
*/
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
@@ -362,14 +422,12 @@ static void fw_card_bm_work(struct work_struct *work)
* If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
goto out;
} else if (root_device_is_cmc) {
/*
- * FIXME: I suppose we should set the cmstr bit in the
- * STATE_CLEAR register of this node, as described in
- * 1394-1995, 8.4.2.6. Also, send out a force root
- * packet for this node.
+ * We will send out a force root packet for this
+ * node as part of the gap count optimization.
*/
new_root_id = root_id;
} else {
@@ -402,19 +460,33 @@ static void fw_card_bm_work(struct work_struct *work)
(card->gap_count != gap_count || new_root_id != root_id))
do_reset = true;
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
if (do_reset) {
fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- fw_core_initiate_bus_reset(card, 1);
+ reset_bus(card, true);
/* Will allocate broadcast channel after the reset. */
- } else {
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ goto out;
+ }
+
+ if (root_device_is_cmc) {
+ /*
+ * Make sure that the cycle master sends cycle start packets.
+ */
+ card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ card->bm_transaction_data, 4);
+ if (rcode == RCODE_GENERATION)
+ goto out;
}
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+
out:
fw_node_put(root_node);
out_put_card:
@@ -432,17 +504,23 @@ void fw_card_initialize(struct fw_card *card,
card->device = device;
card->current_tlabel = 0;
card->tlabel_mask = 0;
+ card->split_timeout_hi = 0;
+ card->split_timeout_lo = 800 << 19;
+ card->split_timeout_cycles = 800;
+ card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
+ INIT_LIST_HEAD(&card->phy_receiver_list);
spin_lock_init(&card->lock);
card->local_node = NULL;
- INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+ INIT_DELAYED_WORK(&card->br_work, br_work);
+ INIT_DELAYED_WORK(&card->bm_work, bm_work);
}
EXPORT_SYMBOL(fw_card_initialize);
@@ -468,20 +546,22 @@ int fw_card_add(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_add);
-
/*
* The next few functions implement a dummy driver that is used once a card
* driver shuts down an fw_card. This allows the driver to cleanly unload,
* as all IO to the card will be handled (and failed) by the dummy driver
* instead of calling into the module. Only functions for iso context
* shutdown still need to be provided by the card driver.
+ *
+ * .read/write_csr() should never be called anymore after the dummy driver
+ * was bound since they are only used within request handler context.
+ * .set_config_rom() is never called since the card is taken out of card_list
+ * before switching to the dummy driver.
*/
-static int dummy_enable(struct fw_card *card,
- const __be32 *config_rom, size_t length)
+static int dummy_read_phy_reg(struct fw_card *card, int address)
{
- BUG();
- return -1;
+ return -ENODEV;
}
static int dummy_update_phy_reg(struct fw_card *card, int address,
@@ -490,25 +570,14 @@ static int dummy_update_phy_reg(struct fw_card *card, int address,
return -ENODEV;
}
-static int dummy_set_config_rom(struct fw_card *card,
- const __be32 *config_rom, size_t length)
-{
- /*
- * We take the card out of card_list before setting the dummy
- * driver, so this should never get called.
- */
- BUG();
- return -1;
-}
-
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
- packet->callback(packet, card, -ENODEV);
+ packet->callback(packet, card, RCODE_CANCELLED);
}
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
- packet->callback(packet, card, -ENODEV);
+ packet->callback(packet, card, RCODE_CANCELLED);
}
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
@@ -522,14 +591,40 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
+static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
+ int type, int channel, size_t header_size)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static int dummy_start_iso(struct fw_iso_context *ctx,
+ s32 cycle, u32 sync, u32 tags)
+{
+ return -ENODEV;
+}
+
+static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
+{
+ return -ENODEV;
+}
+
+static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
+ struct fw_iso_buffer *buffer, unsigned long payload)
+{
+ return -ENODEV;
+}
+
static const struct fw_card_driver dummy_driver_template = {
- .enable = dummy_enable,
- .update_phy_reg = dummy_update_phy_reg,
- .set_config_rom = dummy_set_config_rom,
- .send_request = dummy_send_request,
- .cancel_packet = dummy_cancel_packet,
- .send_response = dummy_send_response,
- .enable_phys_dma = dummy_enable_phys_dma,
+ .read_phy_reg = dummy_read_phy_reg,
+ .update_phy_reg = dummy_update_phy_reg,
+ .send_request = dummy_send_request,
+ .send_response = dummy_send_response,
+ .cancel_packet = dummy_cancel_packet,
+ .enable_phys_dma = dummy_enable_phys_dma,
+ .allocate_iso_context = dummy_allocate_iso_context,
+ .start_iso = dummy_start_iso,
+ .set_iso_channels = dummy_set_iso_channels,
+ .queue_iso = dummy_queue_iso,
};
void fw_card_release(struct kref *kref)
@@ -545,7 +640,7 @@ void fw_core_remove_card(struct fw_card *card)
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
- fw_core_initiate_bus_reset(card, 1);
+ fw_schedule_bus_reset(card, false, true);
mutex_lock(&card_mutex);
list_del_init(&card->link);
@@ -565,12 +660,3 @@ void fw_core_remove_card(struct fw_card *card)
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
-{
- int reg = short_reset ? 5 : 1;
- int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
-
- return card->driver->update_phy_reg(card, reg, 0, bit);
-}
-EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 5bf106b9d791..14bb7b7b5dd7 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -33,7 +34,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/sched.h>
+#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -47,6 +48,13 @@
#include "core.h"
+/*
+ * ABI version history is documented in linux/firewire-cdev.h.
+ */
+#define FW_CDEV_KERNEL_VERSION 4
+#define FW_CDEV_VERSION_EVENT_REQUEST2 4
+#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
+
struct client {
u32 version;
struct fw_device *device;
@@ -63,6 +71,9 @@ struct client {
struct fw_iso_buffer buffer;
unsigned long vm_start;
+ struct list_head phy_receiver_link;
+ u64 phy_receiver_closure;
+
struct list_head link;
struct kref kref;
};
@@ -107,6 +118,7 @@ struct outbound_transaction_resource {
struct inbound_transaction_resource {
struct client_resource resource;
+ struct fw_card *card;
struct fw_request *request;
void *data;
size_t length;
@@ -171,7 +183,10 @@ struct outbound_transaction_event {
struct inbound_transaction_event {
struct event event;
- struct fw_cdev_event_request request;
+ union {
+ struct fw_cdev_event_request request;
+ struct fw_cdev_event_request2 request2;
+ } req;
};
struct iso_interrupt_event {
@@ -179,11 +194,28 @@ struct iso_interrupt_event {
struct fw_cdev_event_iso_interrupt interrupt;
};
+struct iso_interrupt_mc_event {
+ struct event event;
+ struct fw_cdev_event_iso_interrupt_mc interrupt;
+};
+
struct iso_resource_event {
struct event event;
struct fw_cdev_event_iso_resource iso_resource;
};
+struct outbound_phy_packet_event {
+ struct event event;
+ struct client *client;
+ struct fw_packet p;
+ struct fw_cdev_event_phy_packet phy_packet;
+};
+
+struct inbound_phy_packet_event {
+ struct event event;
+ struct fw_cdev_event_phy_packet phy_packet;
+};
+
static inline void __user *u64_to_uptr(__u64 value)
{
return (void __user *)(unsigned long)value;
@@ -219,6 +251,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
+ INIT_LIST_HEAD(&client->phy_receiver_link);
kref_init(&client->kref);
file->private_data = client;
@@ -309,7 +342,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->generation = client->device->generation;
event->node_id = client->device->node_id;
event->local_node_id = card->local_node->node_id;
- event->bm_node_id = 0; /* FIXME: We don't track the BM. */
+ event->bm_node_id = card->bm_node_id;
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
@@ -340,7 +373,7 @@ static void queue_bus_reset_event(struct client *client)
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
- fw_notify("Out of memory when allocating bus reset event\n");
+ fw_notify("Out of memory when allocating event\n");
return;
}
@@ -386,6 +419,9 @@ union ioctl_arg {
struct fw_cdev_allocate_iso_resource allocate_iso_resource;
struct fw_cdev_send_stream_packet send_stream_packet;
struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
+ struct fw_cdev_send_phy_packet send_phy_packet;
+ struct fw_cdev_receive_phy_packets receive_phy_packets;
+ struct fw_cdev_set_iso_channels set_iso_channels;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
@@ -395,7 +431,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
unsigned long ret = 0;
client->version = a->version;
- a->version = FW_CDEV_VERSION;
+ a->version = FW_CDEV_KERNEL_VERSION;
a->card = client->device->card->index;
down_read(&fw_device_rwsem);
@@ -554,6 +590,10 @@ static int init_request(struct client *client,
(request->length > 4096 || request->length > 512 << speed))
return -EIO;
+ if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
+ request->length < 4)
+ return -EINVAL;
+
e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -626,28 +666,34 @@ static void release_request(struct client *client,
if (is_fcp_request(r->request))
kfree(r->data);
else
- fw_send_response(client->device->card, r->request,
- RCODE_CONFLICT_ERROR);
+ fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
+
+ fw_card_put(r->card);
kfree(r);
}
static void handle_request(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
+ int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct address_handler_resource *handler = callback_data;
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
+ size_t event_size0;
void *fcp_frame = NULL;
int ret;
+ /* card may be different from handler->client->device->card */
+ fw_card_get(card);
+
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (r == NULL || e == NULL)
+ if (r == NULL || e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
goto failed;
-
+ }
+ r->card = card;
r->request = request;
r->data = payload;
r->length = length;
@@ -669,15 +715,37 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
if (ret < 0)
goto failed;
- e->request.type = FW_CDEV_EVENT_REQUEST;
- e->request.tcode = tcode;
- e->request.offset = offset;
- e->request.length = length;
- e->request.handle = r->resource.handle;
- e->request.closure = handler->closure;
+ if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
+ struct fw_cdev_event_request *req = &e->req.request;
+
+ if (tcode & 0x10)
+ tcode = TCODE_LOCK_REQUEST;
+
+ req->type = FW_CDEV_EVENT_REQUEST;
+ req->tcode = tcode;
+ req->offset = offset;
+ req->length = length;
+ req->handle = r->resource.handle;
+ req->closure = handler->closure;
+ event_size0 = sizeof(*req);
+ } else {
+ struct fw_cdev_event_request2 *req = &e->req.request2;
+
+ req->type = FW_CDEV_EVENT_REQUEST2;
+ req->tcode = tcode;
+ req->offset = offset;
+ req->source_node_id = source;
+ req->destination_node_id = destination;
+ req->card = card->index;
+ req->generation = generation;
+ req->length = length;
+ req->handle = r->resource.handle;
+ req->closure = handler->closure;
+ event_size0 = sizeof(*req);
+ }
queue_event(handler->client, &e->event,
- &e->request, sizeof(e->request), r->data, length);
+ &e->req, event_size0, r->data, length);
return;
failed:
@@ -687,6 +755,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
if (!is_fcp_request(request))
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+
+ fw_card_put(card);
}
static void release_address_handler(struct client *client,
@@ -711,7 +781,11 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
return -ENOMEM;
region.start = a->offset;
- region.end = a->offset + a->length;
+ if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
+ region.end = a->offset + a->length;
+ else
+ region.end = a->region_end;
+
r->handler.length = a->length;
r->handler.address_callback = handle_request;
r->handler.callback_data = r;
@@ -723,6 +797,7 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
kfree(r);
return ret;
}
+ a->offset = r->handler.offset;
r->resource.release = release_address_handler;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
@@ -757,15 +832,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
if (is_fcp_request(r->request))
goto out;
- if (a->length < r->length)
- r->length = a->length;
- if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
+ if (a->length != fw_get_response_length(r->request)) {
+ ret = -EINVAL;
+ kfree(r->request);
+ goto out;
+ }
+ if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
- fw_send_response(client->device->card, r->request, a->rcode);
+ fw_send_response(r->card, r->request, a->rcode);
out:
+ fw_card_put(r->card);
kfree(r);
return ret;
@@ -773,8 +852,9 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
- return fw_core_initiate_bus_reset(client->device->card,
+ fw_schedule_bus_reset(client->device->card, true,
arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
+ return 0;
}
static void release_descriptor(struct client *client,
@@ -845,10 +925,11 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
struct client *client = data;
struct iso_interrupt_event *e;
- e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
- if (e == NULL)
+ e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
return;
-
+ }
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
e->interrupt.closure = client->iso_closure;
e->interrupt.cycle = cycle;
@@ -858,27 +939,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
sizeof(e->interrupt) + header_length, NULL, 0);
}
+static void iso_mc_callback(struct fw_iso_context *context,
+ dma_addr_t completed, void *data)
+{
+ struct client *client = data;
+ struct iso_interrupt_mc_event *e;
+
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
+ return;
+ }
+ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
+ e->interrupt.closure = client->iso_closure;
+ e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
+ completed);
+ queue_event(client, &e->event, &e->interrupt,
+ sizeof(e->interrupt), NULL, 0);
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
+ fw_iso_callback_t cb;
- /* We only support one context at this time. */
- if (client->iso_context != NULL)
- return -EBUSY;
-
- if (a->channel > 63)
- return -EINVAL;
+ BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
+ FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
+ FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
+ FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
switch (a->type) {
- case FW_ISO_CONTEXT_RECEIVE:
- if (a->header_size < 4 || (a->header_size & 3))
+ case FW_ISO_CONTEXT_TRANSMIT:
+ if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
+
+ cb = iso_callback;
break;
- case FW_ISO_CONTEXT_TRANSMIT:
- if (a->speed > SCODE_3200)
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (a->header_size < 4 || (a->header_size & 3) ||
+ a->channel > 63)
return -EINVAL;
+
+ cb = iso_callback;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ cb = (fw_iso_callback_t)iso_mc_callback;
break;
default:
@@ -886,20 +994,37 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed, a->header_size,
- iso_callback, client);
+ a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context))
return PTR_ERR(context);
+ /* We only support one context at this time. */
+ spin_lock_irq(&client->lock);
+ if (client->iso_context != NULL) {
+ spin_unlock_irq(&client->lock);
+ fw_iso_context_destroy(context);
+ return -EBUSY;
+ }
client->iso_closure = a->closure;
client->iso_context = context;
+ spin_unlock_irq(&client->lock);
- /* We only support one context at this time. */
a->handle = 0;
return 0;
}
+static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
+ struct fw_iso_context *ctx = client->iso_context;
+
+ if (ctx == NULL || a->handle != 0)
+ return -EINVAL;
+
+ return fw_iso_context_set_channels(ctx, &a->channels);
+}
+
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
@@ -913,7 +1038,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
- unsigned long payload, buffer_end, header_length;
+ unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
struct {
@@ -933,7 +1058,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
* use the indirect payload, the iso buffer need not be mapped
* and the a->data pointer is ignored.
*/
-
payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
if (a->data == 0 || client->buffer.pages == NULL ||
@@ -942,8 +1066,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
buffer_end = 0;
}
- p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
+ if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
+ return -EINVAL;
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
@@ -959,31 +1085,32 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
u.packet.sy = GET_SY(control);
u.packet.header_length = GET_HEADER_LENGTH(control);
- if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
- if (u.packet.header_length % 4 != 0)
+ switch (ctx->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ if (u.packet.header_length & 3)
+ return -EINVAL;
+ transmit_header_bytes = u.packet.header_length;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (u.packet.header_length == 0 ||
+ u.packet.header_length % ctx->header_size != 0)
return -EINVAL;
- header_length = u.packet.header_length;
- } else {
- /*
- * We require that header_length is a multiple of
- * the fixed header size, ctx->header_size.
- */
- if (ctx->header_size == 0) {
- if (u.packet.header_length > 0)
- return -EINVAL;
- } else if (u.packet.header_length == 0 ||
- u.packet.header_length % ctx->header_size != 0) {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ if (u.packet.payload_length == 0 ||
+ u.packet.payload_length & 3)
return -EINVAL;
- }
- header_length = 0;
+ break;
}
next = (struct fw_cdev_iso_packet __user *)
- &p->header[header_length / 4];
+ &p->header[transmit_header_bytes / 4];
if (next > end)
return -EINVAL;
if (__copy_from_user
- (u.packet.header, p->header, header_length))
+ (u.packet.header, p->header, transmit_header_bytes))
return -EFAULT;
if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
u.packet.header_length + u.packet.payload_length > 0)
@@ -1011,6 +1138,13 @@ static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_start_iso *a = &arg->start_iso;
+ BUILD_BUG_ON(
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
+ FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
+
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
@@ -1042,7 +1176,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
local_irq_disable();
- cycle_time = card->driver->get_cycle_time(card);
+ cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
switch (a->clk_id) {
case CLOCK_REALTIME: getnstimeofday(&ts); break;
@@ -1323,28 +1457,135 @@ static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
return init_request(client, &request, dest, a->speed);
}
+static void outbound_phy_packet_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
+{
+ struct outbound_phy_packet_event *e =
+ container_of(packet, struct outbound_phy_packet_event, p);
+
+ switch (status) {
+ /* expected: */
+ case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
+ /* should never happen with PHY packets: */
+ case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
+ case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
+ case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
+ /* stale generation; cancelled; on certain controllers: no ack */
+ default: e->phy_packet.rcode = status; break;
+ }
+ e->phy_packet.data[0] = packet->timestamp;
+
+ queue_event(e->client, &e->event, &e->phy_packet,
+ sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
+ client_put(e->client);
+}
+
+static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
+ struct fw_card *card = client->device->card;
+ struct outbound_phy_packet_event *e;
+
+ /* Access policy: Allow this ioctl only on local nodes' device files. */
+ if (!client->device->is_local)
+ return -ENOSYS;
+
+ e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ client_get(client);
+ e->client = client;
+ e->p.speed = SCODE_100;
+ e->p.generation = a->generation;
+ e->p.header[0] = a->data[0];
+ e->p.header[1] = a->data[1];
+ e->p.header_length = 8;
+ e->p.callback = outbound_phy_packet_callback;
+ e->phy_packet.closure = a->closure;
+ e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
+ if (is_ping_packet(a->data))
+ e->phy_packet.length = 4;
+
+ card->driver->send_request(card, &e->p);
+
+ return 0;
+}
+
+static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
+ struct fw_card *card = client->device->card;
+
+ /* Access policy: Allow this ioctl only on local nodes' device files. */
+ if (!client->device->is_local)
+ return -ENOSYS;
+
+ spin_lock_irq(&card->lock);
+
+ list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
+ client->phy_receiver_closure = a->closure;
+
+ spin_unlock_irq(&card->lock);
+
+ return 0;
+}
+
+void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
+{
+ struct client *client;
+ struct inbound_phy_packet_event *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ if (e == NULL) {
+ fw_notify("Out of memory when allocating event\n");
+ break;
+ }
+ e->phy_packet.closure = client->phy_receiver_closure;
+ e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
+ e->phy_packet.rcode = RCODE_COMPLETE;
+ e->phy_packet.length = 8;
+ e->phy_packet.data[0] = p->header[1];
+ e->phy_packet.data[1] = p->header[2];
+ queue_event(client, &e->event,
+ &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
+ }
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
- ioctl_get_info,
- ioctl_send_request,
- ioctl_allocate,
- ioctl_deallocate,
- ioctl_send_response,
- ioctl_initiate_bus_reset,
- ioctl_add_descriptor,
- ioctl_remove_descriptor,
- ioctl_create_iso_context,
- ioctl_queue_iso,
- ioctl_start_iso,
- ioctl_stop_iso,
- ioctl_get_cycle_timer,
- ioctl_allocate_iso_resource,
- ioctl_deallocate_iso_resource,
- ioctl_allocate_iso_resource_once,
- ioctl_deallocate_iso_resource_once,
- ioctl_get_speed,
- ioctl_send_broadcast_request,
- ioctl_send_stream_packet,
- ioctl_get_cycle_timer2,
+ [0x00] = ioctl_get_info,
+ [0x01] = ioctl_send_request,
+ [0x02] = ioctl_allocate,
+ [0x03] = ioctl_deallocate,
+ [0x04] = ioctl_send_response,
+ [0x05] = ioctl_initiate_bus_reset,
+ [0x06] = ioctl_add_descriptor,
+ [0x07] = ioctl_remove_descriptor,
+ [0x08] = ioctl_create_iso_context,
+ [0x09] = ioctl_queue_iso,
+ [0x0a] = ioctl_start_iso,
+ [0x0b] = ioctl_stop_iso,
+ [0x0c] = ioctl_get_cycle_timer,
+ [0x0d] = ioctl_allocate_iso_resource,
+ [0x0e] = ioctl_deallocate_iso_resource,
+ [0x0f] = ioctl_allocate_iso_resource_once,
+ [0x10] = ioctl_deallocate_iso_resource_once,
+ [0x11] = ioctl_get_speed,
+ [0x12] = ioctl_send_broadcast_request,
+ [0x13] = ioctl_send_stream_packet,
+ [0x14] = ioctl_get_cycle_timer2,
+ [0x15] = ioctl_send_phy_packet,
+ [0x16] = ioctl_receive_phy_packets,
+ [0x17] = ioctl_set_iso_channels,
};
static int dispatch_ioctl(struct client *client,
@@ -1452,6 +1693,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client *client = file->private_data;
struct event *event, *next_event;
+ spin_lock_irq(&client->device->card->lock);
+ list_del(&client->phy_receiver_link);
+ spin_unlock_irq(&client->device->card->lock);
+
mutex_lock(&client->device->client_list_mutex);
list_del(&client->link);
mutex_unlock(&client->device->client_list_mutex);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 4b8523f00dce..6113b896e790 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -107,11 +107,11 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
}
/**
- * fw_csr_string - reads a string from the configuration ROM
- * @directory: e.g. root directory or unit directory
- * @key: the key of the preceding directory entry
- * @buf: where to put the string
- * @size: size of @buf, in bytes
+ * fw_csr_string() - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
@@ -1136,6 +1136,7 @@ static void fw_device_refresh(struct work_struct *work)
goto give_up;
}
+ fw_device_cdev_update(device);
create_units(device);
/* Userspace may want to re-read attributes. */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 8f5aebfb29df..c003fa4e2db1 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -118,6 +118,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
+/* Convert DMA address to offset into virtually contiguous buffer. */
+size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
+{
+ int i;
+ dma_addr_t address;
+ ssize_t offset;
+
+ for (i = 0; i < buffer->page_count; i++) {
+ address = page_private(buffer->pages[i]);
+ offset = (ssize_t)completed - (ssize_t)address;
+ if (offset > 0 && offset <= PAGE_SIZE)
+ return (i << PAGE_SHIFT) + offset;
+ }
+
+ return 0;
+}
+
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
@@ -134,7 +151,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->channel = channel;
ctx->speed = speed;
ctx->header_size = header_size;
- ctx->callback = callback;
+ ctx->callback.sc = callback;
ctx->callback_data = callback_data;
return ctx;
@@ -143,9 +160,7 @@ EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
- struct fw_card *card = ctx->card;
-
- card->driver->free_iso_context(ctx);
+ ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
@@ -156,14 +171,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx,
}
EXPORT_SYMBOL(fw_iso_context_start);
+int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
+{
+ return ctx->card->driver->set_iso_channels(ctx, channels);
+}
+
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
- struct fw_card *card = ctx->card;
-
- return card->driver->queue_iso(ctx, packet, buffer, payload);
+ return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
@@ -279,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
}
/**
- * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 93ec64cdeef7..09be1a635505 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -174,12 +174,7 @@ static inline struct fw_node *fw_node(struct list_head *l)
return list_entry(l, struct fw_node, link);
}
-/**
- * build_tree - Build the tree representation of the topology
- * @self_ids: array of self IDs to create the tree from
- * @self_id_count: the length of the self_ids array
- * @local_id: the node ID of the local node
- *
+/*
* This function builds the tree representation of the topology given
* by the self IDs from the latest bus reset. During the construction
* of the tree, the function checks that the self IDs are valid and
@@ -420,11 +415,10 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
}
}
-/**
- * update_tree - compare the old topology tree for card with the new
- * one specified by root. Queue the nodes and mark them as either
- * found, lost or updated. Update the nodes in the card topology tree
- * as we go.
+/*
+ * Compare the old topology tree for card with the new one specified by root.
+ * Queue the nodes and mark them as either found, lost or updated.
+ * Update the nodes in the card topology tree as we go.
*/
static void update_tree(struct fw_card *card, struct fw_node *root)
{
@@ -524,7 +518,7 @@ static void update_topology_map(struct fw_card *card,
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
- int self_id_count, u32 *self_ids)
+ int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
unsigned long flags;
@@ -543,7 +537,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
spin_lock_irqsave(&card->lock, flags);
- card->broadcast_channel_allocated = false;
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
@@ -552,6 +546,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
smp_wmb();
card->generation = generation;
card->reset_jiffies = jiffies;
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index fdc33ff06dc1..ca7ca56661e0 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -246,7 +246,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
default:
- WARN(1, KERN_ERR "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d", tcode);
}
common:
packet->speed = speed;
@@ -273,43 +273,52 @@ static int allocate_tlabel(struct fw_card *card)
}
/**
- * This function provides low-level access to the IEEE1394 transaction
- * logic. Most C programs would use either fw_read(), fw_write() or
- * fw_lock() instead - those function are convenience wrappers for
- * this function. The fw_send_request() function is primarily
- * provided as a flexible, one-stop entry point for languages bindings
- * and protocol bindings.
+ * fw_send_request() - submit a request packet for transmission
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
*
- * FIXME: Document this function further, in particular the possible
- * values for rcode in the callback. In short, we map ACK_COMPLETE to
- * RCODE_COMPLETE, internal errors set errno and set rcode to
- * RCODE_SEND_ERROR (which is out of range for standard ieee1394
- * rcodes). All other rcodes are forwarded unchanged. For all
- * errors, payload is NULL, length is 0.
+ * Submit a request packet into the asynchronous request transmission queue.
+ * Can be called from atomic context. If you prefer a blocking API, use
+ * fw_run_transaction() in a context that can sleep.
*
- * Can not expect the callback to be called before the function
- * returns, though this does happen in some cases (ACK_COMPLETE and
- * errors).
+ * In case of lock requests, specify one of the firewire-core specific %TCODE_
+ * constants instead of %TCODE_LOCK_REQUEST in @tcode.
*
- * The payload is only used for write requests and must not be freed
- * until the callback has been called.
+ * Make sure that the value in @destination_id is not older than the one in
+ * @generation. Otherwise the request is in danger to be sent to a wrong node.
*
- * @param card the card from which to send the request
- * @param tcode the tcode for this transaction. Do not use
- * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
- * etc. to specify tcode and ext_tcode.
- * @param node_id the destination node ID (bus ID and PHY ID concatenated)
- * @param generation the generation for which node_id is valid
- * @param speed the speed to use for sending the request
- * @param offset the 48 bit offset on the destination node
- * @param payload the data payload for the request subaction
- * @param length the length in bytes of the data to read
- * @param callback function to be called when the transaction is completed
- * @param callback_data pointer to arbitrary data, which will be
- * passed to the callback
- *
- * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
+ * It will contain tag, channel, and sy data instead of a node ID then.
+ *
+ * The payload buffer at @data is going to be DMA-mapped except in case of
+ * quadlet-sized payload or of local (loopback) requests. Hence make sure that
+ * the buffer complies with the restrictions for DMA-mapped memory. The
+ * @payload must not be freed before the @callback is called.
+ *
+ * In case of request types without payload, @data is NULL and @length is 0.
+ *
+ * After the transaction is completed successfully or unsuccessfully, the
+ * @callback will be called. Among its parameters is the response code which
+ * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
+ * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
+ * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
+ * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
+ * generation, or missing ACK respectively.
+ *
+ * Note some timing corner cases: fw_send_request() may complete much earlier
+ * than when the request packet actually hits the wire. On the other hand,
+ * transaction completion and hence execution of @callback may happen even
+ * before fw_send_request() returns.
*/
void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed,
@@ -339,7 +348,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t);
/* FIXME: start this timer later, relative to t->timestamp */
- mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+ mod_timer(&t->split_timeout_timer,
+ jiffies + card->split_timeout_jiffies);
t->callback = callback;
t->callback_data = callback_data;
@@ -374,9 +384,11 @@ static void transaction_callback(struct fw_card *card, int rcode,
}
/**
- * fw_run_transaction - send request and sleep until transaction is completed
+ * fw_run_transaction() - send request and sleep until transaction is completed
*
- * Returns the RCODE.
+ * Returns the RCODE. See fw_send_request() for parameter documentation.
+ * Unlike fw_send_request(), @data points to the payload of the request or/and
+ * to the payload of the response.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
@@ -417,9 +429,21 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
- u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
- PHY_CONFIG_ROOT_ID(node_id) |
- PHY_CONFIG_GAP_COUNT(gap_count);
+ u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
+
+ if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
+ data |= PHY_CONFIG_ROOT_ID(node_id);
+
+ if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
+ gap_count = card->driver->read_phy_reg(card, 1);
+ if (gap_count < 0)
+ return;
+
+ gap_count &= 63;
+ if (gap_count == 63)
+ return;
+ }
+ data |= PHY_CONFIG_GAP_COUNT(gap_count);
mutex_lock(&phy_config_mutex);
@@ -494,9 +518,9 @@ static bool is_in_fcp_region(u64 offset, size_t length)
}
/**
- * fw_core_add_address_handler - register for incoming requests
- * @handler: callback
- * @region: region in the IEEE 1212 node space address range
+ * fw_core_add_address_handler() - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
@@ -519,8 +543,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
- region->end & 0xffff000000000003ULL ||
region->start >= region->end ||
+ region->end > 0x0001000000000000ULL ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
@@ -551,7 +575,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
- * fw_core_remove_address_handler - unregister an address handler
+ * fw_core_remove_address_handler() - unregister an address handler
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
@@ -580,6 +604,41 @@ static void free_response_callback(struct fw_packet *packet,
kfree(request);
}
+int fw_get_response_length(struct fw_request *r)
+{
+ int tcode, ext_tcode, data_length;
+
+ tcode = HEADER_GET_TCODE(r->request_header[0]);
+
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ return 0;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return 4;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ return data_length;
+
+ case TCODE_LOCK_REQUEST:
+ ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
+ data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
+ switch (ext_tcode) {
+ case EXTCODE_FETCH_ADD:
+ case EXTCODE_LITTLE_ADD:
+ return data_length;
+ default:
+ return data_length / 2;
+ }
+
+ default:
+ WARN(1, "wrong tcode %d", tcode);
+ return 0;
+ }
+}
+
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
{
@@ -631,18 +690,35 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break;
default:
- WARN(1, KERN_ERR "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d", tcode);
}
response->payload_mapped = false;
}
EXPORT_SYMBOL(fw_fill_response);
-static struct fw_request *allocate_request(struct fw_packet *p)
+static u32 compute_split_timeout_timestamp(struct fw_card *card,
+ u32 request_timestamp)
+{
+ unsigned int cycles;
+ u32 timestamp;
+
+ cycles = card->split_timeout_cycles;
+ cycles += request_timestamp & 0x1fff;
+
+ timestamp = request_timestamp & ~0x1fff;
+ timestamp += (cycles / 8000) << 13;
+ timestamp |= cycles % 8000;
+
+ return timestamp;
+}
+
+static struct fw_request *allocate_request(struct fw_card *card,
+ struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
- int request_tcode, t;
+ int request_tcode;
request_tcode = HEADER_GET_TCODE(p->header[0]);
switch (request_tcode) {
@@ -677,14 +753,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
if (request == NULL)
return NULL;
- t = (p->timestamp & 0x1fff) + 4000;
- if (t >= 8000)
- t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
- else
- t = (p->timestamp & ~0x1fff) + t;
-
request->response.speed = p->speed;
- request->response.timestamp = t;
+ request->response.timestamp =
+ compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -713,7 +784,8 @@ void fw_send_response(struct fw_card *card,
if (rcode == RCODE_COMPLETE)
fw_fill_response(&request->response, request->request_header,
- rcode, request->data, request->length);
+ rcode, request->data,
+ fw_get_response_length(request));
else
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
@@ -731,9 +803,11 @@ static void handle_exclusive_region_request(struct fw_card *card,
unsigned long flags;
int tcode, destination, source;
- tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ if (tcode == TCODE_LOCK_REQUEST)
+ tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
spin_lock_irqsave(&address_handler_lock, flags);
handler = lookup_enclosing_address_handler(&address_handler_list,
@@ -753,7 +827,7 @@ static void handle_exclusive_region_request(struct fw_card *card,
else
handler->address_callback(card, request,
tcode, destination, source,
- p->generation, p->speed, offset,
+ p->generation, offset,
request->data, request->length,
handler->callback_data);
}
@@ -791,8 +865,8 @@ static void handle_fcp_region_request(struct fw_card *card,
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, NULL, tcode,
destination, source,
- p->generation, p->speed,
- offset, request->data,
+ p->generation, offset,
+ request->data,
request->length,
handler->callback_data);
}
@@ -809,7 +883,12 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
- request = allocate_request(p);
+ if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
+ fw_cdev_handle_phy_packet(card, p);
+ return;
+ }
+
+ request = allocate_request(card, p);
if (request == NULL) {
/* FIXME: send statically allocated busy packet. */
return;
@@ -832,13 +911,12 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
unsigned long flags;
u32 *data;
size_t data_length;
- int tcode, tlabel, destination, source, rcode;
+ int tcode, tlabel, source, rcode;
- tcode = HEADER_GET_TCODE(p->header[0]);
- tlabel = HEADER_GET_TLABEL(p->header[0]);
- destination = HEADER_GET_DESTINATION(p->header[0]);
- source = HEADER_GET_SOURCE(p->header[1]);
- rcode = HEADER_GET_RCODE(p->header[1]);
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ tlabel = HEADER_GET_TLABEL(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[1]);
+ rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
@@ -903,8 +981,8 @@ static const struct fw_address_region topology_map_region =
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
int start;
@@ -933,19 +1011,97 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
+static void update_split_timeout(struct fw_card *card)
+{
+ unsigned int cycles;
+
+ cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+
+ cycles = max(cycles, 800u); /* minimum as per the spec */
+ cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+
+ card->split_timeout_cycles = cycles;
+ card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+}
+
static void handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
+ unsigned long flags;
switch (reg) {
+ case CSR_PRIORITY_BUDGET:
+ if (!card->priority_budget_implemented) {
+ rcode = RCODE_ADDRESS_ERROR;
+ break;
+ }
+ /* else fall through */
+
+ case CSR_NODE_IDS:
+ /*
+ * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
+ * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
+ */
+ /* fall through */
+
+ case CSR_STATE_CLEAR:
+ case CSR_STATE_SET:
case CSR_CYCLE_TIME:
- if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
- *data = cpu_to_be32(card->driver->get_cycle_time(card));
+ case CSR_BUS_TIME:
+ case CSR_BUSY_TIMEOUT:
+ if (tcode == TCODE_READ_QUADLET_REQUEST)
+ *data = cpu_to_be32(card->driver->read_csr(card, reg));
+ else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->driver->write_csr(card, reg, be32_to_cpu(*data));
+ else
+ rcode = RCODE_TYPE_ERROR;
+ break;
+
+ case CSR_RESET_START:
+ if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->driver->write_csr(card, CSR_STATE_CLEAR,
+ CSR_STATE_BIT_ABDICATE);
+ else
+ rcode = RCODE_TYPE_ERROR;
+ break;
+
+ case CSR_SPLIT_TIMEOUT_HI:
+ if (tcode == TCODE_READ_QUADLET_REQUEST) {
+ *data = cpu_to_be32(card->split_timeout_hi);
+ } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
+ spin_lock_irqsave(&card->lock, flags);
+ card->split_timeout_hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+ break;
+
+ case CSR_SPLIT_TIMEOUT_LO:
+ if (tcode == TCODE_READ_QUADLET_REQUEST) {
+ *data = cpu_to_be32(card->split_timeout_lo);
+ } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
+ spin_lock_irqsave(&card->lock, flags);
+ card->split_timeout_lo =
+ be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+ break;
+
+ case CSR_MAINT_UTILITY:
+ if (tcode == TCODE_READ_QUADLET_REQUEST)
+ *data = card->maint_utility_register;
+ else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+ card->maint_utility_register = *data;
else
rcode = RCODE_TYPE_ERROR;
break;
@@ -975,12 +1131,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
BUG();
break;
- case CSR_BUSY_TIMEOUT:
- /* FIXME: Implement this. */
-
- case CSR_BUS_TIME:
- /* Useless without initialization by the bus manager. */
-
default:
rcode = RCODE_ADDRESS_ERROR;
break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0ecfcd95f4c5..e6239f971be6 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -38,6 +38,9 @@ struct fw_packet;
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
+#define CSR_STATE_BIT_CMSTR (1 << 8)
+#define CSR_STATE_BIT_ABDICATE (1 << 10)
+
struct fw_card_driver {
/*
* Enable the given card with the given initial config rom.
@@ -48,6 +51,7 @@ struct fw_card_driver {
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
+ int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
@@ -75,7 +79,8 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
- u32 (*get_cycle_time)(struct fw_card *card);
+ u32 (*read_csr)(struct fw_card *card, int csr_offset);
+ void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
@@ -85,6 +90,8 @@ struct fw_card_driver {
int (*start_iso)(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags);
+ int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
+
int (*queue_iso)(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -98,8 +105,8 @@ void fw_card_initialize(struct fw_card *card,
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_core_remove_card(struct fw_card *card);
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(__be32 *block);
+void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -123,6 +130,7 @@ extern const struct file_operations fw_device_ops;
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
+void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
@@ -192,7 +200,7 @@ static inline void fw_node_put(struct fw_node *node)
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
- int generation, int self_id_count, u32 *self_ids);
+ int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
/*
@@ -209,6 +217,7 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
+#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
@@ -218,9 +227,18 @@ static inline bool is_next_generation(int new_generation, int old_generation)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
+
+#define FW_PHY_CONFIG_NO_NODE_ID -1
+#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
+static inline bool is_ping_packet(u32 *data)
+{
+ return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
+}
+
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 7142eeec8074..da17d409a244 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -806,8 +806,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset, void *payload,
- size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
diff --git a/drivers/firewire/nosy-user.h b/drivers/firewire/nosy-user.h
new file mode 100644
index 000000000000..e48aa6200c72
--- /dev/null
+++ b/drivers/firewire/nosy-user.h
@@ -0,0 +1,25 @@
+#ifndef __nosy_user_h
+#define __nosy_user_h
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats)
+#define NOSY_IOC_START _IO('&', 1)
+#define NOSY_IOC_STOP _IO('&', 2)
+#define NOSY_IOC_FILTER _IOW('&', 2, __u32)
+
+struct nosy_stats {
+ __u32 total_packet_count;
+ __u32 lost_packet_count;
+};
+
+/*
+ * Format of packets returned from the kernel driver:
+ *
+ * quadlet with timestamp (microseconds, CPU endian)
+ * quadlet-padded packet data... (little endian)
+ * quadlet with ack (little endian)
+ */
+
+#endif /* __nosy_user_h */
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
new file mode 100644
index 000000000000..8528b10763ed
--- /dev/null
+++ b/drivers/firewire/nosy.c
@@ -0,0 +1,721 @@
+/*
+ * nosy - Snoop mode driver for TI PCILynx 1394 controllers
+ * Copyright (C) 2002-2007 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h> /* required for linux/wait.h */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "nosy.h"
+#include "nosy-user.h"
+
+#define TCODE_PHY_PACKET 0x10
+#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
+
+static char driver_name[] = KBUILD_MODNAME;
+
+/* this is the physical layout of a PCL, its size is 128 bytes */
+struct pcl {
+ __le32 next;
+ __le32 async_error_next;
+ u32 user_data;
+ __le32 pcl_status;
+ __le32 remaining_transfer_count;
+ __le32 next_data_buffer;
+ struct {
+ __le32 control;
+ __le32 pointer;
+ } buffer[13];
+};
+
+struct packet {
+ unsigned int length;
+ char data[0];
+};
+
+struct packet_buffer {
+ char *data;
+ size_t capacity;
+ long total_packet_count, lost_packet_count;
+ atomic_t size;
+ struct packet *head, *tail;
+ wait_queue_head_t wait;
+};
+
+struct pcilynx {
+ struct pci_dev *pci_device;
+ __iomem char *registers;
+
+ struct pcl *rcv_start_pcl, *rcv_pcl;
+ __le32 *rcv_buffer;
+
+ dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
+
+ spinlock_t client_list_lock;
+ struct list_head client_list;
+
+ struct miscdevice misc;
+ struct list_head link;
+ struct kref kref;
+};
+
+static inline struct pcilynx *
+lynx_get(struct pcilynx *lynx)
+{
+ kref_get(&lynx->kref);
+
+ return lynx;
+}
+
+static void
+lynx_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct pcilynx, kref));
+}
+
+static inline void
+lynx_put(struct pcilynx *lynx)
+{
+ kref_put(&lynx->kref, lynx_release);
+}
+
+struct client {
+ struct pcilynx *lynx;
+ u32 tcode_mask;
+ struct packet_buffer buffer;
+ struct list_head link;
+};
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static int
+packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
+{
+ buffer->data = kmalloc(capacity, GFP_KERNEL);
+ if (buffer->data == NULL)
+ return -ENOMEM;
+ buffer->head = (struct packet *) buffer->data;
+ buffer->tail = (struct packet *) buffer->data;
+ buffer->capacity = capacity;
+ buffer->lost_packet_count = 0;
+ atomic_set(&buffer->size, 0);
+ init_waitqueue_head(&buffer->wait);
+
+ return 0;
+}
+
+static void
+packet_buffer_destroy(struct packet_buffer *buffer)
+{
+ kfree(buffer->data);
+}
+
+static int
+packet_buffer_get(struct client *client, char __user *data, size_t user_length)
+{
+ struct packet_buffer *buffer = &client->buffer;
+ size_t length;
+ char *end;
+
+ if (wait_event_interruptible(buffer->wait,
+ atomic_read(&buffer->size) > 0) ||
+ list_empty(&client->lynx->link))
+ return -ERESTARTSYS;
+
+ if (atomic_read(&buffer->size) == 0)
+ return -ENODEV;
+
+ /* FIXME: Check length <= user_length. */
+
+ end = buffer->data + buffer->capacity;
+ length = buffer->head->length;
+
+ if (&buffer->head->data[length] < end) {
+ if (copy_to_user(data, buffer->head->data, length))
+ return -EFAULT;
+ buffer->head = (struct packet *) &buffer->head->data[length];
+ } else {
+ size_t split = end - buffer->head->data;
+
+ if (copy_to_user(data, buffer->head->data, split))
+ return -EFAULT;
+ if (copy_to_user(data + split, buffer->data, length - split))
+ return -EFAULT;
+ buffer->head = (struct packet *) &buffer->data[length - split];
+ }
+
+ /*
+ * Decrease buffer->size as the last thing, since this is what
+ * keeps the interrupt from overwriting the packet we are
+ * retrieving from the buffer.
+ */
+ atomic_sub(sizeof(struct packet) + length, &buffer->size);
+
+ return length;
+}
+
+static void
+packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
+{
+ char *end;
+
+ buffer->total_packet_count++;
+
+ if (buffer->capacity <
+ atomic_read(&buffer->size) + sizeof(struct packet) + length) {
+ buffer->lost_packet_count++;
+ return;
+ }
+
+ end = buffer->data + buffer->capacity;
+ buffer->tail->length = length;
+
+ if (&buffer->tail->data[length] < end) {
+ memcpy(buffer->tail->data, data, length);
+ buffer->tail = (struct packet *) &buffer->tail->data[length];
+ } else {
+ size_t split = end - buffer->tail->data;
+
+ memcpy(buffer->tail->data, data, split);
+ memcpy(buffer->data, data + split, length - split);
+ buffer->tail = (struct packet *) &buffer->data[length - split];
+ }
+
+ /* Finally, adjust buffer size and wake up userspace reader. */
+
+ atomic_add(sizeof(struct packet) + length, &buffer->size);
+ wake_up_interruptible(&buffer->wait);
+}
+
+static inline void
+reg_write(struct pcilynx *lynx, int offset, u32 data)
+{
+ writel(data, lynx->registers + offset);
+}
+
+static inline u32
+reg_read(struct pcilynx *lynx, int offset)
+{
+ return readl(lynx->registers + offset);
+}
+
+static inline void
+reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
+{
+ reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
+}
+
+/*
+ * Maybe the pcl programs could be set up to just append data instead
+ * of using a whole packet.
+ */
+static inline void
+run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
+ int dmachan)
+{
+ reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
+ reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
+ DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
+}
+
+static int
+set_phy_reg(struct pcilynx *lynx, int addr, int val)
+{
+ if (addr > 15) {
+ dev_err(&lynx->pci_device->dev,
+ "PHY register address %d out of range\n", addr);
+ return -1;
+ }
+ if (val > 0xff) {
+ dev_err(&lynx->pci_device->dev,
+ "PHY register value %d out of range\n", val);
+ return -1;
+ }
+ reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
+ LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
+
+ return 0;
+}
+
+static int
+nosy_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct client *client;
+ struct pcilynx *tmp, *lynx = NULL;
+
+ mutex_lock(&card_mutex);
+ list_for_each_entry(tmp, &card_list, link)
+ if (tmp->misc.minor == minor) {
+ lynx = lynx_get(tmp);
+ break;
+ }
+ mutex_unlock(&card_mutex);
+ if (lynx == NULL)
+ return -ENODEV;
+
+ client = kmalloc(sizeof *client, GFP_KERNEL);
+ if (client == NULL)
+ goto fail;
+
+ client->tcode_mask = ~0;
+ client->lynx = lynx;
+ INIT_LIST_HEAD(&client->link);
+
+ if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
+ goto fail;
+
+ file->private_data = client;
+
+ return 0;
+fail:
+ kfree(client);
+ lynx_put(lynx);
+
+ return -ENOMEM;
+}
+
+static int
+nosy_release(struct inode *inode, struct file *file)
+{
+ struct client *client = file->private_data;
+ struct pcilynx *lynx = client->lynx;
+
+ spin_lock_irq(&lynx->client_list_lock);
+ list_del_init(&client->link);
+ spin_unlock_irq(&lynx->client_list_lock);
+
+ packet_buffer_destroy(&client->buffer);
+ kfree(client);
+ lynx_put(lynx);
+
+ return 0;
+}
+
+static unsigned int
+nosy_poll(struct file *file, poll_table *pt)
+{
+ struct client *client = file->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(file, &client->buffer.wait, pt);
+
+ if (atomic_read(&client->buffer.size) > 0)
+ ret = POLLIN | POLLRDNORM;
+
+ if (list_empty(&client->lynx->link))
+ ret |= POLLHUP;
+
+ return ret;
+}
+
+static ssize_t
+nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
+{
+ struct client *client = file->private_data;
+
+ return packet_buffer_get(client, buffer, count);
+}
+
+static long
+nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct client *client = file->private_data;
+ spinlock_t *client_list_lock = &client->lynx->client_list_lock;
+ struct nosy_stats stats;
+
+ switch (cmd) {
+ case NOSY_IOC_GET_STATS:
+ spin_lock_irq(client_list_lock);
+ stats.total_packet_count = client->buffer.total_packet_count;
+ stats.lost_packet_count = client->buffer.lost_packet_count;
+ spin_unlock_irq(client_list_lock);
+
+ if (copy_to_user((void __user *) arg, &stats, sizeof stats))
+ return -EFAULT;
+ else
+ return 0;
+
+ case NOSY_IOC_START:
+ spin_lock_irq(client_list_lock);
+ list_add_tail(&client->link, &client->lynx->client_list);
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ case NOSY_IOC_STOP:
+ spin_lock_irq(client_list_lock);
+ list_del_init(&client->link);
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ case NOSY_IOC_FILTER:
+ spin_lock_irq(client_list_lock);
+ client->tcode_mask = arg;
+ spin_unlock_irq(client_list_lock);
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ /* Flush buffer, configure filter. */
+ }
+}
+
+static const struct file_operations nosy_ops = {
+ .owner = THIS_MODULE,
+ .read = nosy_read,
+ .unlocked_ioctl = nosy_ioctl,
+ .poll = nosy_poll,
+ .open = nosy_open,
+ .release = nosy_release,
+};
+
+#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
+
+static void
+packet_irq_handler(struct pcilynx *lynx)
+{
+ struct client *client;
+ u32 tcode_mask, tcode;
+ size_t length;
+ struct timeval tv;
+
+ /* FIXME: Also report rcv_speed. */
+
+ length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
+ tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
+
+ do_gettimeofday(&tv);
+ lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
+
+ if (length == PHY_PACKET_SIZE)
+ tcode_mask = 1 << TCODE_PHY_PACKET;
+ else
+ tcode_mask = 1 << tcode;
+
+ spin_lock(&lynx->client_list_lock);
+
+ list_for_each_entry(client, &lynx->client_list, link)
+ if (client->tcode_mask & tcode_mask)
+ packet_buffer_put(&client->buffer,
+ lynx->rcv_buffer, length + 4);
+
+ spin_unlock(&lynx->client_list_lock);
+}
+
+static void
+bus_reset_irq_handler(struct pcilynx *lynx)
+{
+ struct client *client;
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ spin_lock(&lynx->client_list_lock);
+
+ list_for_each_entry(client, &lynx->client_list, link)
+ packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
+
+ spin_unlock(&lynx->client_list_lock);
+}
+
+static irqreturn_t
+irq_handler(int irq, void *device)
+{
+ struct pcilynx *lynx = device;
+ u32 pci_int_status;
+
+ pci_int_status = reg_read(lynx, PCI_INT_STATUS);
+
+ if (pci_int_status == ~0)
+ /* Card was ejected. */
+ return IRQ_NONE;
+
+ if ((pci_int_status & PCI_INT_INT_PEND) == 0)
+ /* Not our interrupt, bail out quickly. */
+ return IRQ_NONE;
+
+ if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
+ u32 link_int_status;
+
+ link_int_status = reg_read(lynx, LINK_INT_STATUS);
+ reg_write(lynx, LINK_INT_STATUS, link_int_status);
+
+ if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
+ bus_reset_irq_handler(lynx);
+ }
+
+ /* Clear the PCI_INT_STATUS register only after clearing the
+ * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
+ * be set again immediately. */
+
+ reg_write(lynx, PCI_INT_STATUS, pci_int_status);
+
+ if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
+ packet_irq_handler(lynx);
+ run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+remove_card(struct pci_dev *dev)
+{
+ struct pcilynx *lynx = pci_get_drvdata(dev);
+ struct client *client;
+
+ mutex_lock(&card_mutex);
+ list_del_init(&lynx->link);
+ misc_deregister(&lynx->misc);
+ mutex_unlock(&card_mutex);
+
+ reg_write(lynx, PCI_INT_ENABLE, 0);
+ free_irq(lynx->pci_device->irq, lynx);
+
+ spin_lock_irq(&lynx->client_list_lock);
+ list_for_each_entry(client, &lynx->client_list, link)
+ wake_up_interruptible(&client->buffer.wait);
+ spin_unlock_irq(&lynx->client_list_lock);
+
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ pci_free_consistent(lynx->pci_device, PAGE_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
+
+ iounmap(lynx->registers);
+ pci_disable_device(dev);
+ lynx_put(lynx);
+}
+
+#define RCV_BUFFER_SIZE (16 * 1024)
+
+static int __devinit
+add_card(struct pci_dev *dev, const struct pci_device_id *unused)
+{
+ struct pcilynx *lynx;
+ u32 p, end;
+ int ret, i;
+
+ if (pci_set_dma_mask(dev, 0xffffffff)) {
+ dev_err(&dev->dev,
+ "DMA address limits not supported for PCILynx hardware\n");
+ return -ENXIO;
+ }
+ if (pci_enable_device(dev)) {
+ dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
+ return -ENXIO;
+ }
+ pci_set_master(dev);
+
+ lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
+ if (lynx == NULL) {
+ dev_err(&dev->dev, "Failed to allocate control structure\n");
+ ret = -ENOMEM;
+ goto fail_disable;
+ }
+ lynx->pci_device = dev;
+ pci_set_drvdata(dev, lynx);
+
+ spin_lock_init(&lynx->client_list_lock);
+ INIT_LIST_HEAD(&lynx->client_list);
+ kref_init(&lynx->kref);
+
+ lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+ PCILYNX_MAX_REGISTER);
+
+ lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
+ sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
+ lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
+ sizeof(struct pcl), &lynx->rcv_pcl_bus);
+ lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
+ RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
+ if (lynx->rcv_start_pcl == NULL ||
+ lynx->rcv_pcl == NULL ||
+ lynx->rcv_buffer == NULL) {
+ dev_err(&dev->dev, "Failed to allocate receive buffer\n");
+ ret = -ENOMEM;
+ goto fail_deallocate;
+ }
+ lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
+ lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
+ lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
+
+ lynx->rcv_pcl->buffer[0].control =
+ cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
+ lynx->rcv_pcl->buffer[0].pointer =
+ cpu_to_le32(lynx->rcv_buffer_bus + 4);
+ p = lynx->rcv_buffer_bus + 2048;
+ end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
+ for (i = 1; p < end; i++, p += 2048) {
+ lynx->rcv_pcl->buffer[i].control =
+ cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
+ lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
+ }
+ lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
+
+ reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
+ /* Fix buggy cards with autoboot pin not tied low: */
+ reg_write(lynx, DMA0_CHAN_CTRL, 0);
+ reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
+
+#if 0
+ /* now, looking for PHY register set */
+ if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
+ lynx->phyic.reg_1394a = 1;
+ PRINT(KERN_INFO, lynx->id,
+ "found 1394a conform PHY (using extended register set)");
+ lynx->phyic.vendor = get_phy_vendorid(lynx);
+ lynx->phyic.product = get_phy_productid(lynx);
+ } else {
+ lynx->phyic.reg_1394a = 0;
+ PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
+ }
+#endif
+
+ /* Setup the general receive FIFO max size. */
+ reg_write(lynx, FIFO_SIZES, 255);
+
+ reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
+
+ reg_write(lynx, LINK_INT_ENABLE,
+ LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
+ LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
+ LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
+ LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
+ LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
+
+ /* Disable the L flag in self ID packets. */
+ set_phy_reg(lynx, 4, 0);
+
+ /* Put this baby into snoop mode */
+ reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
+
+ run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
+
+ if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
+ driver_name, lynx)) {
+ dev_err(&dev->dev,
+ "Failed to allocate shared interrupt %d\n", dev->irq);
+ ret = -EIO;
+ goto fail_deallocate;
+ }
+
+ lynx->misc.parent = &dev->dev;
+ lynx->misc.minor = MISC_DYNAMIC_MINOR;
+ lynx->misc.name = "nosy";
+ lynx->misc.fops = &nosy_ops;
+
+ mutex_lock(&card_mutex);
+ ret = misc_register(&lynx->misc);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to register misc char device\n");
+ mutex_unlock(&card_mutex);
+ goto fail_free_irq;
+ }
+ list_add_tail(&lynx->link, &card_list);
+ mutex_unlock(&card_mutex);
+
+ dev_info(&dev->dev,
+ "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
+
+ return 0;
+
+fail_free_irq:
+ reg_write(lynx, PCI_INT_ENABLE, 0);
+ free_irq(lynx->pci_device->irq, lynx);
+
+fail_deallocate:
+ if (lynx->rcv_start_pcl)
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ if (lynx->rcv_pcl)
+ pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ if (lynx->rcv_buffer)
+ pci_free_consistent(lynx->pci_device, PAGE_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
+ iounmap(lynx->registers);
+ kfree(lynx);
+
+fail_disable:
+ pci_disable_device(dev);
+
+ return ret;
+}
+
+static struct pci_device_id pci_table[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_TI,
+ .device = PCI_DEVICE_ID_TI_PCILYNX,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { } /* Terminating entry */
+};
+
+static struct pci_driver lynx_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_table,
+ .probe = add_card,
+ .remove = remove_card,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg");
+MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static int __init nosy_init(void)
+{
+ return pci_register_driver(&lynx_pci_driver);
+}
+
+static void __exit nosy_cleanup(void)
+{
+ pci_unregister_driver(&lynx_pci_driver);
+
+ pr_info("Unloaded %s\n", driver_name);
+}
+
+module_init(nosy_init);
+module_exit(nosy_cleanup);
diff --git a/drivers/firewire/nosy.h b/drivers/firewire/nosy.h
new file mode 100644
index 000000000000..078ff27f4756
--- /dev/null
+++ b/drivers/firewire/nosy.h
@@ -0,0 +1,237 @@
+/*
+ * Chip register definitions for PCILynx chipset. Based on pcilynx.h
+ * from the Linux 1394 drivers, but modified a bit so the names here
+ * match the specification exactly (even though they have weird names,
+ * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent
+ * reject" etc.)
+ */
+
+#define PCILYNX_MAX_REGISTER 0xfff
+#define PCILYNX_MAX_MEMORY 0xffff
+
+#define PCI_LATENCY_CACHELINE 0x0c
+
+#define MISC_CONTROL 0x40
+#define MISC_CONTROL_SWRESET (1<<0)
+
+#define SERIAL_EEPROM_CONTROL 0x44
+
+#define PCI_INT_STATUS 0x48
+#define PCI_INT_ENABLE 0x4c
+/* status and enable have identical bit numbers */
+#define PCI_INT_INT_PEND (1<<31)
+#define PCI_INT_FRC_INT (1<<30)
+#define PCI_INT_SLV_ADR_PERR (1<<28)
+#define PCI_INT_SLV_DAT_PERR (1<<27)
+#define PCI_INT_MST_DAT_PERR (1<<26)
+#define PCI_INT_MST_DEV_TO (1<<25)
+#define PCI_INT_INT_SLV_TO (1<<23)
+#define PCI_INT_AUX_TO (1<<18)
+#define PCI_INT_AUX_INT (1<<17)
+#define PCI_INT_P1394_INT (1<<16)
+#define PCI_INT_DMA4_PCL (1<<9)
+#define PCI_INT_DMA4_HLT (1<<8)
+#define PCI_INT_DMA3_PCL (1<<7)
+#define PCI_INT_DMA3_HLT (1<<6)
+#define PCI_INT_DMA2_PCL (1<<5)
+#define PCI_INT_DMA2_HLT (1<<4)
+#define PCI_INT_DMA1_PCL (1<<3)
+#define PCI_INT_DMA1_HLT (1<<2)
+#define PCI_INT_DMA0_PCL (1<<1)
+#define PCI_INT_DMA0_HLT (1<<0)
+/* all DMA interrupts combined: */
+#define PCI_INT_DMA_ALL 0x3ff
+
+#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
+#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
+
+#define LBUS_ADDR 0xb4
+#define LBUS_ADDR_SEL_RAM (0x0<<16)
+#define LBUS_ADDR_SEL_ROM (0x1<<16)
+#define LBUS_ADDR_SEL_AUX (0x2<<16)
+#define LBUS_ADDR_SEL_ZV (0x3<<16)
+
+#define GPIO_CTRL_A 0xb8
+#define GPIO_CTRL_B 0xbc
+#define GPIO_DATA_BASE 0xc0
+
+#define DMA_BREG(base, chan) (base + chan * 0x20)
+#define DMA_SREG(base, chan) (base + chan * 0x10)
+
+#define PCL_NEXT_INVALID (1<<0)
+
+/* transfer commands */
+#define PCL_CMD_RCV (0x1<<24)
+#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
+#define PCL_CMD_XMT (0x2<<24)
+#define PCL_CMD_UNFXMT (0xc<<24)
+#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
+#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
+
+/* aux commands */
+#define PCL_CMD_NOP (0x0<<24)
+#define PCL_CMD_LOAD (0x3<<24)
+#define PCL_CMD_STOREQ (0x4<<24)
+#define PCL_CMD_STORED (0xb<<24)
+#define PCL_CMD_STORE0 (0x5<<24)
+#define PCL_CMD_STORE1 (0x6<<24)
+#define PCL_CMD_COMPARE (0xe<<24)
+#define PCL_CMD_SWAP_COMPARE (0xf<<24)
+#define PCL_CMD_ADD (0xd<<24)
+#define PCL_CMD_BRANCH (0x7<<24)
+
+/* BRANCH condition codes */
+#define PCL_COND_DMARDY_SET (0x1<<20)
+#define PCL_COND_DMARDY_CLEAR (0x2<<20)
+
+#define PCL_GEN_INTR (1<<19)
+#define PCL_LAST_BUFF (1<<18)
+#define PCL_LAST_CMD (PCL_LAST_BUFF)
+#define PCL_WAITSTAT (1<<17)
+#define PCL_BIGENDIAN (1<<16)
+#define PCL_ISOMODE (1<<12)
+
+#define DMA0_PREV_PCL 0x100
+#define DMA1_PREV_PCL 0x120
+#define DMA2_PREV_PCL 0x140
+#define DMA3_PREV_PCL 0x160
+#define DMA4_PREV_PCL 0x180
+#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
+
+#define DMA0_CURRENT_PCL 0x104
+#define DMA1_CURRENT_PCL 0x124
+#define DMA2_CURRENT_PCL 0x144
+#define DMA3_CURRENT_PCL 0x164
+#define DMA4_CURRENT_PCL 0x184
+#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
+
+#define DMA0_CHAN_STAT 0x10c
+#define DMA1_CHAN_STAT 0x12c
+#define DMA2_CHAN_STAT 0x14c
+#define DMA3_CHAN_STAT 0x16c
+#define DMA4_CHAN_STAT 0x18c
+#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
+/* CHAN_STATUS registers share bits */
+#define DMA_CHAN_STAT_SELFID (1<<31)
+#define DMA_CHAN_STAT_ISOPKT (1<<30)
+#define DMA_CHAN_STAT_PCIERR (1<<29)
+#define DMA_CHAN_STAT_PKTERR (1<<28)
+#define DMA_CHAN_STAT_PKTCMPL (1<<27)
+#define DMA_CHAN_STAT_SPECIALACK (1<<14)
+
+#define DMA0_CHAN_CTRL 0x110
+#define DMA1_CHAN_CTRL 0x130
+#define DMA2_CHAN_CTRL 0x150
+#define DMA3_CHAN_CTRL 0x170
+#define DMA4_CHAN_CTRL 0x190
+#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
+/* CHAN_CTRL registers share bits */
+#define DMA_CHAN_CTRL_ENABLE (1<<31)
+#define DMA_CHAN_CTRL_BUSY (1<<30)
+#define DMA_CHAN_CTRL_LINK (1<<29)
+
+#define DMA0_READY 0x114
+#define DMA1_READY 0x134
+#define DMA2_READY 0x154
+#define DMA3_READY 0x174
+#define DMA4_READY 0x194
+#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
+
+#define DMA_GLOBAL_REGISTER 0x908
+
+#define FIFO_SIZES 0xa00
+
+#define FIFO_CONTROL 0xa10
+#define FIFO_CONTROL_GRF_FLUSH (1<<4)
+#define FIFO_CONTROL_ITF_FLUSH (1<<3)
+#define FIFO_CONTROL_ATF_FLUSH (1<<2)
+
+#define FIFO_XMIT_THRESHOLD 0xa14
+
+#define DMA0_WORD0_CMP_VALUE 0xb00
+#define DMA1_WORD0_CMP_VALUE 0xb10
+#define DMA2_WORD0_CMP_VALUE 0xb20
+#define DMA3_WORD0_CMP_VALUE 0xb30
+#define DMA4_WORD0_CMP_VALUE 0xb40
+#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
+
+#define DMA0_WORD0_CMP_ENABLE 0xb04
+#define DMA1_WORD0_CMP_ENABLE 0xb14
+#define DMA2_WORD0_CMP_ENABLE 0xb24
+#define DMA3_WORD0_CMP_ENABLE 0xb34
+#define DMA4_WORD0_CMP_ENABLE 0xb44
+#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan))
+
+#define DMA0_WORD1_CMP_VALUE 0xb08
+#define DMA1_WORD1_CMP_VALUE 0xb18
+#define DMA2_WORD1_CMP_VALUE 0xb28
+#define DMA3_WORD1_CMP_VALUE 0xb38
+#define DMA4_WORD1_CMP_VALUE 0xb48
+#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
+
+#define DMA0_WORD1_CMP_ENABLE 0xb0c
+#define DMA1_WORD1_CMP_ENABLE 0xb1c
+#define DMA2_WORD1_CMP_ENABLE 0xb2c
+#define DMA3_WORD1_CMP_ENABLE 0xb3c
+#define DMA4_WORD1_CMP_ENABLE 0xb4c
+#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan))
+/* word 1 compare enable flags */
+#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
+#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
+#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
+#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
+#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
+#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
+#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
+
+#define LINK_ID 0xf00
+#define LINK_ID_BUS(id) (id<<22)
+#define LINK_ID_NODE(id) (id<<16)
+
+#define LINK_CONTROL 0xf04
+#define LINK_CONTROL_BUSY (1<<29)
+#define LINK_CONTROL_TX_ISO_EN (1<<26)
+#define LINK_CONTROL_RX_ISO_EN (1<<25)
+#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
+#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
+#define LINK_CONTROL_RESET_TX (1<<21)
+#define LINK_CONTROL_RESET_RX (1<<20)
+#define LINK_CONTROL_CYCMASTER (1<<11)
+#define LINK_CONTROL_CYCSOURCE (1<<10)
+#define LINK_CONTROL_CYCTIMEREN (1<<9)
+#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
+#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
+
+#define CYCLE_TIMER 0xf08
+
+#define LINK_PHY 0xf0c
+#define LINK_PHY_READ (1<<31)
+#define LINK_PHY_WRITE (1<<30)
+#define LINK_PHY_ADDR(addr) (addr<<24)
+#define LINK_PHY_WDATA(data) (data<<16)
+#define LINK_PHY_RADDR(addr) (addr<<8)
+
+#define LINK_INT_STATUS 0xf14
+#define LINK_INT_ENABLE 0xf18
+/* status and enable have identical bit numbers */
+#define LINK_INT_LINK_INT (1<<31)
+#define LINK_INT_PHY_TIME_OUT (1<<30)
+#define LINK_INT_PHY_REG_RCVD (1<<29)
+#define LINK_INT_PHY_BUSRESET (1<<28)
+#define LINK_INT_TX_RDY (1<<26)
+#define LINK_INT_RX_DATA_RDY (1<<25)
+#define LINK_INT_IT_STUCK (1<<20)
+#define LINK_INT_AT_STUCK (1<<19)
+#define LINK_INT_SNTRJ (1<<17)
+#define LINK_INT_HDR_ERR (1<<16)
+#define LINK_INT_TC_ERR (1<<15)
+#define LINK_INT_CYC_SEC (1<<11)
+#define LINK_INT_CYC_STRT (1<<10)
+#define LINK_INT_CYC_DONE (1<<9)
+#define LINK_INT_CYC_PEND (1<<8)
+#define LINK_INT_CYC_LOST (1<<7)
+#define LINK_INT_CYC_ARB_FAILED (1<<6)
+#define LINK_INT_GRF_OVER_FLOW (1<<5)
+#define LINK_INT_ITF_UNDER_FLOW (1<<4)
+#define LINK_INT_ATF_UNDER_FLOW (1<<3)
+#define LINK_INT_IARB_FAILED (1<<0)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9f627e758cfc..7f03540cabe8 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -32,11 +33,13 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/time.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -170,6 +173,10 @@ struct fw_ohci {
int generation;
int request_generation; /* for timestamping incoming requests */
unsigned quirks;
+ unsigned int pri_req_max;
+ u32 bus_time;
+ bool is_root;
+ bool csr_state_setclear_abdicate;
/*
* Spinlock for accessing fw_ohci data. Never call out of
@@ -177,16 +184,20 @@ struct fw_ohci {
*/
spinlock_t lock;
+ struct mutex phy_reg_mutex;
+
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
struct context at_request_ctx;
struct context at_response_ctx;
- u32 it_context_mask;
+ u32 it_context_mask; /* unoccupied IT contexts */
struct iso_context *it_context_list;
- u64 ir_context_channels;
- u32 ir_context_mask;
+ u64 ir_context_channels; /* unoccupied channels */
+ u32 ir_context_mask; /* unoccupied IR contexts */
struct iso_context *ir_context_list;
+ u64 mc_channels; /* channels in use by the multichannel IR context */
+ bool mc_allocated;
__be32 *config_rom;
dma_addr_t config_rom_bus;
@@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
#define QUIRK_NO_1394A 8
+#define QUIRK_NO_MSI 16
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -247,6 +260,7 @@ static const struct {
QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
@@ -260,6 +274,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
+ ", disable MSI = " __stringify(QUIRK_NO_MSI)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -288,7 +303,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -298,6 +313,7 @@ static void log_irqs(u32 evt)
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
+ evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@@ -305,7 +321,8 @@ static void log_irqs(u32 evt)
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
+ OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+ OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@@ -470,12 +487,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
- msleep(1);
+ /*
+ * Try a few times without waiting. Sleeping is necessary
+ * only when the link/PHY interface is busy.
+ */
+ if (i >= 3)
+ msleep(1);
}
fw_error("failed to read phy reg\n");
@@ -488,25 +510,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
reg_write(ohci, OHCI1394_PhyControl,
OHCI1394_PhyControl_Write(addr, val));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
- msleep(1);
+ if (i >= 3)
+ msleep(1);
}
fw_error("failed to write phy reg\n");
return -EBUSY;
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int update_phy_reg(struct fw_ohci *ohci, int addr,
+ int clear_bits, int set_bits)
{
- struct fw_ohci *ohci = fw_ohci(card);
- int ret;
-
- ret = read_phy_reg(ohci, addr);
+ int ret = read_phy_reg(ohci, addr);
if (ret < 0)
return ret;
@@ -524,13 +544,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
{
int ret;
- ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
if (ret < 0)
return ret;
return read_phy_reg(ohci, addr);
}
+static int ohci_read_phy_reg(struct fw_card *card, int addr)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ ret = read_phy_reg(ohci, addr);
+ mutex_unlock(&ohci->phy_reg_mutex);
+
+ return ret;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
+ mutex_unlock(&ohci->phy_reg_mutex);
+
+ return ret;
+}
+
static int ar_context_add_page(struct ar_context *ctx)
{
struct device *dev = ctx->ohci->card.device;
@@ -553,6 +598,7 @@ static int ar_context_add_page(struct ar_context *ctx)
ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
ab->descriptor.branch_address = 0;
+ wmb(); /* finish init of new descriptors before branch_address update */
ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
ctx->last_buffer->next = ab;
ctx->last_buffer = ab;
@@ -940,6 +986,8 @@ static void context_append(struct context *ctx,
d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
desc->used += (z + extra) * sizeof(*d);
+
+ wmb(); /* finish init of new descriptors before branch_address update */
ctx->prev->branch_address = cpu_to_le32(d_bus | z);
ctx->prev = find_branch_descriptor(d, z);
@@ -1026,6 +1074,9 @@ static int at_context_queue_packet(struct context *ctx,
header[1] = cpu_to_le32(packet->header[0]);
header[2] = cpu_to_le32(packet->header[1]);
d[0].req_count = cpu_to_le16(12);
+
+ if (is_ping_packet(packet->header))
+ d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
break;
case 4:
@@ -1311,6 +1362,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
}
+static u32 cycle_timer_ticks(u32 cycle_timer)
+{
+ u32 ticks;
+
+ ticks = cycle_timer & 0xfff;
+ ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
+ ticks += (3072 * 8000) * (cycle_timer >> 25);
+
+ return ticks;
+}
+
+/*
+ * Some controllers exhibit one or more of the following bugs when updating the
+ * iso cycle timer register:
+ * - When the lowest six bits are wrapping around to zero, a read that happens
+ * at the same time will return garbage in the lowest ten bits.
+ * - When the cycleOffset field wraps around to zero, the cycleCount field is
+ * not incremented for about 60 ns.
+ * - Occasionally, the entire register reads zero.
+ *
+ * To catch these, we read the register three times and ensure that the
+ * difference between each two consecutive reads is approximately the same, i.e.
+ * less than twice the other. Furthermore, any negative difference indicates an
+ * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
+ * execute, so we have enough precision to compute the ratio of the differences.)
+ */
+static u32 get_cycle_time(struct fw_ohci *ohci)
+{
+ u32 c0, c1, c2;
+ u32 t0, t1, t2;
+ s32 diff01, diff12;
+ int i;
+
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+ i = 0;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ do {
+ c0 = c1;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ t0 = cycle_timer_ticks(c0);
+ t1 = cycle_timer_ticks(c1);
+ t2 = cycle_timer_ticks(c2);
+ diff01 = t1 - t0;
+ diff12 = t2 - t1;
+ } while ((diff01 <= 0 || diff12 <= 0 ||
+ diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
+ && i++ < 20);
+ }
+
+ return c2;
+}
+
+/*
+ * This function has to be called at least every 64 seconds. The bus_time
+ * field stores not only the upper 25 bits of the BUS_TIME register but also
+ * the most significant bit of the cycle timer in bit 6 so that we can detect
+ * changes in this bit.
+ */
+static u32 update_bus_time(struct fw_ohci *ohci)
+{
+ u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
+
+ if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
+ ohci->bus_time += 0x40;
+
+ return ohci->bus_time | cycle_time_seconds;
+}
+
static void bus_reset_tasklet(unsigned long data)
{
struct fw_ohci *ohci = (struct fw_ohci *)data;
@@ -1319,6 +1442,7 @@ static void bus_reset_tasklet(unsigned long data)
unsigned long flags;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
+ bool is_new_root;
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
@@ -1332,6 +1456,12 @@ static void bus_reset_tasklet(unsigned long data)
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
+ is_new_root = (reg & OHCI1394_NodeID_root) != 0;
+ if (!(ohci->is_root && is_new_root))
+ reg_write(ohci, OHCI1394_LinkControlSet,
+ OHCI1394_LinkControl_cycleMaster);
+ ohci->is_root = is_new_root;
+
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
fw_notify("inconsistent self IDs\n");
@@ -1439,7 +1569,9 @@ static void bus_reset_tasklet(unsigned long data)
self_id_count, ohci->self_id_buffer);
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
- self_id_count, ohci->self_id_buffer);
+ self_id_count, ohci->self_id_buffer,
+ ohci->csr_state_setclear_abdicate);
+ ohci->csr_state_setclear_abdicate = false;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -1515,6 +1647,12 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
+ if (event & OHCI1394_cycle64Seconds) {
+ spin_lock(&ohci->lock);
+ update_bus_time(ohci);
+ spin_unlock(&ohci->lock);
+ }
+
return IRQ_HANDLED;
}
@@ -1577,7 +1715,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
set = 0;
}
- ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ ret = update_phy_reg(ohci, 5, clear, set);
if (ret < 0)
return ret;
@@ -1599,7 +1737,7 @@ static int ohci_enable(struct fw_card *card,
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
- u32 lps;
+ u32 lps, seconds, version, irqs;
int i, ret;
if (software_reset(ohci)) {
@@ -1635,17 +1773,34 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_HCControl_noByteSwapData);
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
- reg_write(ohci, OHCI1394_LinkControlClear,
- OHCI1394_LinkControl_rcvPhyPkt);
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_rcvSelfID |
+ OHCI1394_LinkControl_rcvPhyPkt |
OHCI1394_LinkControl_cycleTimerEnable |
OHCI1394_LinkControl_cycleMaster);
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES << 4) |
- (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+ (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
+ (200 << 16));
+
+ seconds = lower_32_bits(get_seconds());
+ reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
+ ohci->bus_time = seconds & ~0x3f;
+
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ if (version >= OHCI_VERSION_1_1) {
+ reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
+ 0xfffffffe);
+ card->broadcast_channel_auto_allocated = true;
+ }
+
+ /* Get implemented bits of the priority arbitration request counter. */
+ reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
+ ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
+ reg_write(ohci, OHCI1394_FairnessControl, 0);
+ card->priority_budget_implemented = ohci->pri_req_max != 0;
ar_context_run(&ohci->ar_request_ctx);
ar_context_run(&ohci->ar_response_ctx);
@@ -1653,16 +1808,6 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
reg_write(ohci, OHCI1394_IntEventClear, ~0);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- reg_write(ohci, OHCI1394_IntMaskSet,
- OHCI1394_selfIDComplete |
- OHCI1394_RQPkt | OHCI1394_RSPkt |
- OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
- OHCI1394_isochRx | OHCI1394_isochTx |
- OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
- OHCI1394_masterIntEnable);
- if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
- reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
ret = configure_1394a_enhancements(ohci);
if (ret < 0)
@@ -1719,26 +1864,38 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+ if (!(ohci->quirks & QUIRK_NO_MSI))
+ pci_enable_msi(dev);
if (request_irq(dev->irq, irq_handler,
- IRQF_SHARED, ohci_driver_name, ohci)) {
- fw_error("Failed to allocate shared interrupt %d.\n",
- dev->irq);
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
+ ohci_driver_name, ohci)) {
+ fw_error("Failed to allocate interrupt %d.\n", dev->irq);
+ pci_disable_msi(dev);
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
ohci->config_rom, ohci->config_rom_bus);
return -EIO;
}
+ irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+ OHCI1394_RQPkt | OHCI1394_RSPkt |
+ OHCI1394_isochTx | OHCI1394_isochRx |
+ OHCI1394_postedWriteErr |
+ OHCI1394_selfIDComplete |
+ OHCI1394_regAccessFail |
+ OHCI1394_cycle64Seconds |
+ OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
+ OHCI1394_masterIntEnable;
+ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+ irqs |= OHCI1394_busReset;
+ reg_write(ohci, OHCI1394_IntMaskSet, irqs);
+
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_linkEnable |
OHCI1394_HCControl_BIBimageValid);
flush_writes(ohci);
- /*
- * We are ready to go, initiate bus reset to finish the
- * initialization.
- */
-
- fw_core_initiate_bus_reset(&ohci->card, 1);
+ /* We are ready to go, reset bus to finish initialization. */
+ fw_schedule_bus_reset(&ohci->card, false, true);
return 0;
}
@@ -1813,7 +1970,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* takes effect.
*/
if (ret == 0)
- fw_core_initiate_bus_reset(&ohci->card, 1);
+ fw_schedule_bus_reset(&ohci->card, true, true);
else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
@@ -1903,61 +2060,117 @@ static int ohci_enable_phys_dma(struct fw_card *card,
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u32 cycle_timer_ticks(u32 cycle_timer)
+static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
- u32 ticks;
+ struct fw_ohci *ohci = fw_ohci(card);
+ unsigned long flags;
+ u32 value;
+
+ switch (csr_offset) {
+ case CSR_STATE_CLEAR:
+ case CSR_STATE_SET:
+ if (ohci->is_root &&
+ (reg_read(ohci, OHCI1394_LinkControlSet) &
+ OHCI1394_LinkControl_cycleMaster))
+ value = CSR_STATE_BIT_CMSTR;
+ else
+ value = 0;
+ if (ohci->csr_state_setclear_abdicate)
+ value |= CSR_STATE_BIT_ABDICATE;
- ticks = cycle_timer & 0xfff;
- ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
- ticks += (3072 * 8000) * (cycle_timer >> 25);
+ return value;
- return ticks;
+ case CSR_NODE_IDS:
+ return reg_read(ohci, OHCI1394_NodeID) << 16;
+
+ case CSR_CYCLE_TIME:
+ return get_cycle_time(ohci);
+
+ case CSR_BUS_TIME:
+ /*
+ * We might be called just after the cycle timer has wrapped
+ * around but just before the cycle64Seconds handler, so we
+ * better check here, too, if the bus time needs to be updated.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ value = update_bus_time(ohci);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ return value;
+
+ case CSR_BUSY_TIMEOUT:
+ value = reg_read(ohci, OHCI1394_ATRetries);
+ return (value >> 4) & 0x0ffff00f;
+
+ case CSR_PRIORITY_BUDGET:
+ return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
+ (ohci->pri_req_max << 8);
+
+ default:
+ WARN_ON(1);
+ return 0;
+ }
}
-/*
- * Some controllers exhibit one or more of the following bugs when updating the
- * iso cycle timer register:
- * - When the lowest six bits are wrapping around to zero, a read that happens
- * at the same time will return garbage in the lowest ten bits.
- * - When the cycleOffset field wraps around to zero, the cycleCount field is
- * not incremented for about 60 ns.
- * - Occasionally, the entire register reads zero.
- *
- * To catch these, we read the register three times and ensure that the
- * difference between each two consecutive reads is approximately the same, i.e.
- * less than twice the other. Furthermore, any negative difference indicates an
- * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
- * execute, so we have enough precision to compute the ratio of the differences.)
- */
-static u32 ohci_get_cycle_time(struct fw_card *card)
+static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
- u32 c0, c1, c2;
- u32 t0, t1, t2;
- s32 diff01, diff12;
- int i;
+ unsigned long flags;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ switch (csr_offset) {
+ case CSR_STATE_CLEAR:
+ if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
+ reg_write(ohci, OHCI1394_LinkControlClear,
+ OHCI1394_LinkControl_cycleMaster);
+ flush_writes(ohci);
+ }
+ if (value & CSR_STATE_BIT_ABDICATE)
+ ohci->csr_state_setclear_abdicate = false;
+ break;
- if (ohci->quirks & QUIRK_CYCLE_TIMER) {
- i = 0;
- c1 = c2;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- do {
- c0 = c1;
- c1 = c2;
- c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- t0 = cycle_timer_ticks(c0);
- t1 = cycle_timer_ticks(c1);
- t2 = cycle_timer_ticks(c2);
- diff01 = t1 - t0;
- diff12 = t2 - t1;
- } while ((diff01 <= 0 || diff12 <= 0 ||
- diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
- && i++ < 20);
- }
+ case CSR_STATE_SET:
+ if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
+ reg_write(ohci, OHCI1394_LinkControlSet,
+ OHCI1394_LinkControl_cycleMaster);
+ flush_writes(ohci);
+ }
+ if (value & CSR_STATE_BIT_ABDICATE)
+ ohci->csr_state_setclear_abdicate = true;
+ break;
- return c2;
+ case CSR_NODE_IDS:
+ reg_write(ohci, OHCI1394_NodeID, value >> 16);
+ flush_writes(ohci);
+ break;
+
+ case CSR_CYCLE_TIME:
+ reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
+ reg_write(ohci, OHCI1394_IntEventSet,
+ OHCI1394_cycleInconsistent);
+ flush_writes(ohci);
+ break;
+
+ case CSR_BUS_TIME:
+ spin_lock_irqsave(&ohci->lock, flags);
+ ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ break;
+
+ case CSR_BUSY_TIMEOUT:
+ value = (value & 0xf) | ((value & 0xf) << 4) |
+ ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
+ reg_write(ohci, OHCI1394_ATRetries, value);
+ flush_writes(ohci);
+ break;
+
+ case CSR_PRIORITY_BUDGET:
+ reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
+ flush_writes(ohci);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1992,10 +2205,9 @@ static int handle_ir_packet_per_buffer(struct context *context,
__le32 *ir_header;
void *p;
- for (pd = d; pd <= last; pd++) {
+ for (pd = d; pd <= last; pd++)
if (pd->transfer_status)
break;
- }
if (pd > last)
/* Descriptor(s) not done yet, stop iteration */
return 0;
@@ -2005,16 +2217,38 @@ static int handle_ir_packet_per_buffer(struct context *context,
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) p;
- ctx->base.callback(&ctx->base,
- le32_to_cpu(ir_header[0]) & 0xffff,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
+ ctx->base.callback.sc(&ctx->base,
+ le32_to_cpu(ir_header[0]) & 0xffff,
+ ctx->header_length, ctx->header,
+ ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}
+/* d == last because each descriptor block is only a single descriptor. */
+static int handle_ir_buffer_fill(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+{
+ struct iso_context *ctx =
+ container_of(context, struct iso_context, context);
+
+ if (!last->transfer_status)
+ /* Descriptor(s) not done yet, stop iteration */
+ return 0;
+
+ if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+ ctx->base.callback.mc(&ctx->base,
+ le32_to_cpu(last->data_address) +
+ le16_to_cpu(last->req_count) -
+ le16_to_cpu(last->res_count),
+ ctx->base.callback_data);
+
+ return 1;
+}
+
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -2040,71 +2274,118 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
}
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
- ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
+ ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
+ ctx->header_length, ctx->header,
+ ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}
+static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
+{
+ u32 hi = channels >> 32, lo = channels;
+
+ reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
+ reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
+ mmiowb();
+ ohci->mc_channels = channels;
+}
+
static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct iso_context *ctx, *list;
- descriptor_callback_t callback;
- u64 *channels, dont_care = ~0ULL;
- u32 *mask, regs;
+ struct iso_context *uninitialized_var(ctx);
+ descriptor_callback_t uninitialized_var(callback);
+ u64 *uninitialized_var(channels);
+ u32 *uninitialized_var(mask), uninitialized_var(regs);
unsigned long flags;
- int index, ret = -ENOMEM;
+ int index, ret = -EBUSY;
+
+ spin_lock_irqsave(&ohci->lock, flags);
- if (type == FW_ISO_CONTEXT_TRANSMIT) {
- channels = &dont_care;
- mask = &ohci->it_context_mask;
- list = ohci->it_context_list;
+ switch (type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ mask = &ohci->it_context_mask;
callback = handle_it_packet;
- } else {
+ index = ffs(*mask) - 1;
+ if (index >= 0) {
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoXmitContextBase(index);
+ ctx = &ohci->it_context_list[index];
+ }
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
channels = &ohci->ir_context_channels;
- mask = &ohci->ir_context_mask;
- list = ohci->ir_context_list;
+ mask = &ohci->ir_context_mask;
callback = handle_ir_packet_per_buffer;
- }
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- spin_lock_irqsave(&ohci->lock, flags);
- index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- *channels &= ~(1ULL << channel);
- *mask &= ~(1 << index);
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_buffer_fill;
+ index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ ohci->mc_allocated = true;
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
+
+ default:
+ index = -1;
+ ret = -ENOSYS;
}
+
spin_unlock_irqrestore(&ohci->lock, flags);
if (index < 0)
- return ERR_PTR(-EBUSY);
-
- if (type == FW_ISO_CONTEXT_TRANSMIT)
- regs = OHCI1394_IsoXmitContextBase(index);
- else
- regs = OHCI1394_IsoRcvContextBase(index);
+ return ERR_PTR(ret);
- ctx = &list[index];
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
- if (ctx->header == NULL)
+ if (ctx->header == NULL) {
+ ret = -ENOMEM;
goto out;
-
+ }
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ set_multichannel_mask(ohci, 0);
+
return &ctx->base;
out_with_header:
free_page((unsigned long)ctx->header);
out:
spin_lock_irqsave(&ohci->lock, flags);
+
+ switch (type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ *channels |= 1ULL << channel;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ohci->mc_allocated = false;
+ break;
+ }
*mask |= 1 << index;
+
spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(ret);
@@ -2115,10 +2396,11 @@ static int ohci_start_iso(struct fw_iso_context *base,
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct fw_ohci *ohci = ctx->context.ohci;
- u32 control, match;
+ u32 control = IR_CONTEXT_ISOCH_HEADER, match;
int index;
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (ctx->base.type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
match = 0;
if (cycle >= 0)
@@ -2128,9 +2410,13 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
context_run(&ctx->context, match);
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
+ /* fall through */
+ case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
- control = IR_CONTEXT_ISOCH_HEADER;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@@ -2141,6 +2427,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
context_run(&ctx->context, control);
+ break;
}
return 0;
@@ -2152,12 +2439,17 @@ static int ohci_stop_iso(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int index;
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (ctx->base.type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
index = ctx - ohci->ir_context_list;
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+ break;
}
flush_writes(ohci);
context_stop(&ctx->context);
@@ -2178,24 +2470,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
spin_lock_irqsave(&ohci->lock, flags);
- if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ switch (base->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
ohci->it_context_mask |= 1 << index;
- } else {
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= 1ULL << base->channel;
+ break;
+
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ index = ctx - ohci->ir_context_list;
+ ohci->ir_context_mask |= 1 << index;
+ ohci->ir_context_channels |= ohci->mc_channels;
+ ohci->mc_channels = 0;
+ ohci->mc_allocated = false;
+ break;
}
spin_unlock_irqrestore(&ohci->lock, flags);
}
-static int ohci_queue_iso_transmit(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
+{
+ struct fw_ohci *ohci = fw_ohci(base->card);
+ unsigned long flags;
+ int ret;
+
+ switch (base->type) {
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ /* Don't allow multichannel to grab other contexts' channels. */
+ if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
+ *channels = ohci->ir_context_channels;
+ ret = -EBUSY;
+ } else {
+ set_multichannel_mask(ohci, *channels);
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int queue_iso_transmit(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
@@ -2291,14 +2624,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
-static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+static int queue_iso_packet_per_buffer(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *pd;
- struct fw_iso_packet *p = packet;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
int i, j, length;
@@ -2308,14 +2639,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
- packet_count = p->header_length / ctx->base.header_size;
+ packet_count = packet->header_length / ctx->base.header_size;
header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
- payload_per_buffer = p->payload_length / packet_count;
+ payload_per_buffer = packet->payload_length / packet_count;
for (i = 0; i < packet_count; i++) {
/* d points to the header descriptor */
@@ -2327,7 +2658,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
d->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
- if (p->skip && i == 0)
+ if (packet->skip && i == 0)
d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
d->req_count = cpu_to_le16(header_size);
d->res_count = d->req_count;
@@ -2360,7 +2691,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_LAST |
DESCRIPTOR_BRANCH_ALWAYS);
- if (p->interrupt && i == packet_count - 1)
+ if (packet->interrupt && i == packet_count - 1)
pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
context_append(&ctx->context, d, z, header_z);
@@ -2369,6 +2700,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
return 0;
}
+static int queue_iso_buffer_fill(struct iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct descriptor *d;
+ dma_addr_t d_bus, page_bus;
+ int page, offset, rest, z, i, length;
+
+ page = payload >> PAGE_SHIFT;
+ offset = payload & ~PAGE_MASK;
+ rest = packet->payload_length;
+
+ /* We need one descriptor for each page in the buffer. */
+ z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
+
+ if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
+ return -EFAULT;
+
+ for (i = 0; i < z; i++) {
+ d = context_get_descriptors(&ctx->context, 1, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+ DESCRIPTOR_BRANCH_ALWAYS);
+ if (packet->skip && i == 0)
+ d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+ if (packet->interrupt && i == z - 1)
+ d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+ if (offset + rest < PAGE_SIZE)
+ length = rest;
+ else
+ length = PAGE_SIZE - offset;
+ d->req_count = cpu_to_le16(length);
+ d->res_count = d->req_count;
+ d->transfer_status = 0;
+
+ page_bus = page_private(buffer->pages[page]);
+ d->data_address = cpu_to_le32(page_bus + offset);
+
+ rest -= length;
+ offset = 0;
+ page++;
+
+ context_append(&ctx->context, d, 1, 0);
+ }
+
+ return 0;
+}
+
static int ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2376,14 +2759,20 @@ static int ohci_queue_iso(struct fw_iso_context *base,
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
- int ret;
+ int ret = -ENOSYS;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
- if (base->type == FW_ISO_CONTEXT_TRANSMIT)
- ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else
- ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
- buffer, payload);
+ switch (base->type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ ret = queue_iso_transmit(ctx, packet, buffer, payload);
+ break;
+ case FW_ISO_CONTEXT_RECEIVE:
+ ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
+ break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
+ break;
+ }
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return ret;
@@ -2391,16 +2780,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
+ .read_phy_reg = ohci_read_phy_reg,
.update_phy_reg = ohci_update_phy_reg,
.set_config_rom = ohci_set_config_rom,
.send_request = ohci_send_request,
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
- .get_cycle_time = ohci_get_cycle_time,
+ .read_csr = ohci_read_csr,
+ .write_csr = ohci_write_csr,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
+ .set_iso_channels = ohci_set_iso_channels,
.queue_iso = ohci_queue_iso,
.start_iso = ohci_start_iso,
.stop_iso = ohci_stop_iso,
@@ -2465,6 +2857,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_set_drvdata(dev, ohci);
spin_lock_init(&ohci->lock);
+ mutex_init(&ohci->phy_reg_mutex);
tasklet_init(&ohci->bus_reset_tasklet,
bus_reset_tasklet, (unsigned long)ohci);
@@ -2625,6 +3018,7 @@ static void pci_remove(struct pci_dev *dev)
context_release(&ohci->at_response_ctx);
kfree(ohci->it_context_list);
kfree(ohci->ir_context_list);
+ pci_disable_msi(dev);
pci_iounmap(dev, ohci->registers);
pci_release_region(dev, 0);
pci_disable_device(dev);
@@ -2642,6 +3036,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
software_reset(ohci);
free_irq(dev->irq, ohci);
+ pci_disable_msi(dev);
err = pci_save_state(dev);
if (err) {
fw_error("pci_save_state failed\n");
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 3bc9a5d744eb..0e6c5a466908 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -60,6 +60,7 @@
#define OHCI1394_LinkControl_cycleSource (1 << 22)
#define OHCI1394_NodeID 0x0E8
#define OHCI1394_NodeID_idValid 0x80000000
+#define OHCI1394_NodeID_root 0x40000000
#define OHCI1394_NodeID_nodeNumber 0x0000003f
#define OHCI1394_NodeID_busNumber 0x0000ffc0
#define OHCI1394_PhyControl 0x0EC
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index ca264f2fdf0c..9f76171717e5 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -410,8 +410,7 @@ static void free_orb(struct kref *kref)
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
+ int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
@@ -508,8 +507,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
- &orb->pointer, sizeof(orb->pointer),
- complete_transaction, orb);
+ &orb->pointer, 8, complete_transaction, orb);
}
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
@@ -654,7 +652,7 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
- &d, sizeof(d));
+ &d, 4);
}
static void complete_agent_reset_write_no_wait(struct fw_card *card,
@@ -676,7 +674,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
- &d, sizeof(d), complete_agent_reset_write_no_wait, t);
+ &d, 4, complete_agent_reset_write_no_wait, t);
}
static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
@@ -866,8 +864,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
- CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
- &d, sizeof(d));
+ CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
}
static void sbp2_reconnect(struct work_struct *work);
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index adaefabc40e9..c5a031b79d03 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -172,7 +172,7 @@ static DEFINE_SPINLOCK(dv1394_cards_lock);
static inline struct video_card* file_to_video_card(struct file *file)
{
- return (struct video_card*) file->private_data;
+ return file->private_data;
}
/*** FRAME METHODS *********************************************************/
@@ -610,7 +610,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
} else {
u32 transmit_sec, transmit_cyc;
- u32 ts_cyc, ts_off;
+ u32 ts_cyc;
/* DMA is stopped, so this is the very first frame */
video->active_frame = this_frame;
@@ -636,7 +636,6 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
transmit_sec += transmit_cyc/8000;
transmit_cyc %= 8000;
- ts_off = ct_off;
ts_cyc = transmit_cyc + 3;
ts_cyc %= 8000;
@@ -1784,7 +1783,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
struct video_card *video = NULL;
if (file->private_data) {
- video = (struct video_card*) file->private_data;
+ video = file->private_data;
} else {
/* look up the card by ID */
@@ -2004,7 +2003,7 @@ static void ir_tasklet_func(unsigned long data)
int sof=0; /* start-of-frame flag */
struct frame *f;
- u16 packet_length, packet_time;
+ u16 packet_length;
int i, dbc=0;
struct DMA_descriptor_block *block = NULL;
u16 xferstatus;
@@ -2024,11 +2023,6 @@ static void ir_tasklet_func(unsigned long data)
sizeof(struct packet));
packet_length = le16_to_cpu(p->data_length);
- packet_time = le16_to_cpu(p->timestamp);
-
- irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
- packet_time, packet_length,
- p->data[0], p->data[1]);
/* get the descriptor based on packet_buffer cursor */
f = video->frames[video->current_packet / MAX_PACKETS];
@@ -2320,7 +2314,6 @@ static void dv1394_add_host(struct hpsb_host *host)
static void dv1394_host_reset(struct hpsb_host *host)
{
- struct ti_ohci *ohci;
struct video_card *video = NULL, *tmp_vid;
unsigned long flags;
@@ -2328,9 +2321,6 @@ static void dv1394_host_reset(struct hpsb_host *host)
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
- ohci = (struct ti_ohci *)host->hostdata;
-
-
/* find the corresponding video_cards */
spin_lock_irqsave(&dv1394_cards_lock, flags);
list_for_each_entry(tmp_vid, &dv1394_cards, list) {
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index a4e9dcb6d4a9..bc289e367e30 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1258,7 +1258,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
char *buf;
struct eth1394_host_info *hi;
struct net_device *dev;
- struct eth1394_priv *priv;
unsigned int len;
u32 specifier_id;
u16 source_id;
@@ -1288,8 +1287,6 @@ static void ether1394_iso(struct hpsb_iso *iso)
(be32_to_cpu(data[1]) & 0xff000000) >> 24;
source_id = be32_to_cpu(data[0]) >> 16;
- priv = netdev_priv(dev);
-
if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
|| specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
/* This packet is not for us */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index b563d5e9fa2e..f3401427404c 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -440,7 +440,7 @@ static struct pending_request *next_complete_req(struct file_info *fi)
static ssize_t raw1394_read(struct file *file, char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
- struct file_info *fi = (struct file_info *)file->private_data;
+ struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t ret;
@@ -1015,7 +1015,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
struct arm_addr *arm_addr = NULL;
struct arm_request *arm_req = NULL;
struct arm_response *arm_resp = NULL;
- int found = 0, size = 0, rcode = -1, length_conflict = 0;
+ int found = 0, size = 0, rcode = -1;
struct arm_request_response *arm_req_resp = NULL;
DBGMSG("arm_write called by node: %X "
@@ -1054,7 +1054,6 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
}
if (arm_addr->rec_length < length) {
DBGMSG("arm_write blocklength too big -> rcode_data_error");
- length_conflict = 1;
rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
}
if (rcode == -1) {
@@ -2245,7 +2244,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
static ssize_t raw1394_write(struct file *file, const char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
- struct file_info *fi = (struct file_info *)file->private_data;
+ struct file_info *fi = file->private_data;
struct pending_request *req;
ssize_t retval = -EBADFD;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4565cb5d3d1a..d6e251a300ce 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1350,12 +1350,11 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
struct csr1212_keyval *kv;
struct csr1212_dentry *dentry;
u64 management_agent_addr;
- u32 unit_characteristics, firmware_revision, model;
+ u32 firmware_revision, model;
unsigned workarounds;
int i;
management_agent_addr = 0;
- unit_characteristics = 0;
firmware_revision = SBP2_ROM_VALUE_MISSING;
model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
ud->model_id : SBP2_ROM_VALUE_MISSING;
@@ -1372,17 +1371,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
lu->lun = ORB_SET_LUN(kv->value.immediate);
break;
- case SBP2_UNIT_CHARACTERISTICS_KEY:
- /* FIXME: This is ignored so far.
- * See SBP-2 clause 7.4.8. */
- unit_characteristics = kv->value.immediate;
- break;
case SBP2_FIRMWARE_REVISION_KEY:
firmware_revision = kv->value.immediate;
break;
default:
+ /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY
+ * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */
+
/* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
* Its "ordered" bit has consequences for command ORB
* list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index a42bd6893bcf..5c74f796d7f1 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -720,7 +720,7 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
static long video1394_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
unsigned long flags;
void __user *argp = (void __user *)arg;
@@ -1045,14 +1045,9 @@ static long video1394_ioctl(struct file *file,
if (get_user(qv, &p->packet_sizes))
return -EFAULT;
- psizes = kmalloc(buf_size, GFP_KERNEL);
- if (!psizes)
- return -ENOMEM;
-
- if (copy_from_user(psizes, qv, buf_size)) {
- kfree(psizes);
- return -EFAULT;
- }
+ psizes = memdup_user(qv, buf_size);
+ if (IS_ERR(psizes))
+ return PTR_ERR(psizes);
}
spin_lock_irqsave(&d->lock,flags);
@@ -1177,7 +1172,7 @@ static long video1394_ioctl(struct file *file,
static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ctx->ohci->host->id,
@@ -1244,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
static int video1394_release(struct inode *inode, struct file *file)
{
- struct file_ctx *ctx = (struct file_ctx *)file->private_data;
+ struct file_ctx *ctx = file->private_data;
struct ti_ohci *ohci = ctx->ohci;
struct list_head *lh, *next;
u64 mask;
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 75afe4f81e33..7424b0493f9d 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -194,8 +194,8 @@ static const struct firedtv_backend backend = {
static void handle_fcp(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
- int speed, unsigned long long offset,
- void *payload, size_t length, void *callback_data)
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
{
struct firedtv *f, *fdtv = NULL;
struct fw_device *device;
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 68f883b30a53..68c642d8843d 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -30,12 +30,18 @@
#include <linux/types.h>
#include <linux/firewire-constants.h>
-#define FW_CDEV_EVENT_BUS_RESET 0x00
-#define FW_CDEV_EVENT_RESPONSE 0x01
-#define FW_CDEV_EVENT_REQUEST 0x02
-#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
-#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
-#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
+#define FW_CDEV_EVENT_BUS_RESET 0x00
+#define FW_CDEV_EVENT_RESPONSE 0x01
+#define FW_CDEV_EVENT_REQUEST 0x02
+#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
+#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
+#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
+
+/* available since kernel version 2.6.36 */
+#define FW_CDEV_EVENT_REQUEST2 0x06
+#define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07
+#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
+#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -68,6 +74,10 @@ struct fw_cdev_event_common {
* This event is sent when the bus the device belongs to goes through a bus
* reset. It provides information about the new bus configuration, such as
* new node ID for this device, new root ID, and others.
+ *
+ * If @bm_node_id is 0xffff right after bus reset it can be reread by an
+ * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished.
+ * Kernels with ABI version < 4 do not set @bm_node_id.
*/
struct fw_cdev_event_bus_reset {
__u64 closure;
@@ -82,8 +92,9 @@ struct fw_cdev_event_bus_reset {
/**
* struct fw_cdev_event_response - Sent when a response packet was received
- * @closure: See &fw_cdev_event_common;
- * set by %FW_CDEV_IOC_SEND_REQUEST ioctl
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
+ * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
+ * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
* @rcode: Response code returned by the remote node
* @length: Data length, i.e. the response's payload size in bytes
@@ -93,6 +104,11 @@ struct fw_cdev_event_bus_reset {
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
* accessed through the @data field.
+ *
+ * The event is also generated after conclusions of transactions that do not
+ * involve response packets. This includes unified write transactions,
+ * broadcast write transactions, and transmission of asynchronous stream
+ * packets. @rcode indicates success or failure of such transmissions.
*/
struct fw_cdev_event_response {
__u64 closure;
@@ -103,11 +119,46 @@ struct fw_cdev_event_response {
};
/**
- * struct fw_cdev_event_request - Sent on incoming request to an address region
+ * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
+ * @tcode: See &fw_cdev_event_request2
+ * @offset: See &fw_cdev_event_request2
+ * @handle: See &fw_cdev_event_request2
+ * @length: See &fw_cdev_event_request2
+ * @data: See &fw_cdev_event_request2
+ *
+ * This event is sent instead of &fw_cdev_event_request2 if the kernel or
+ * the client implements ABI version <= 3.
+ *
+ * Unlike &fw_cdev_event_request2, the sender identity cannot be established,
+ * broadcast write requests cannot be distinguished from unicast writes, and
+ * @tcode of lock requests is %TCODE_LOCK_REQUEST.
+ *
+ * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as
+ * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send
+ * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl.
+ */
+struct fw_cdev_event_request {
+ __u64 closure;
+ __u32 type;
+ __u32 tcode;
+ __u64 offset;
+ __u32 handle;
+ __u32 length;
+ __u32 data[0];
+};
+
+/**
+ * struct fw_cdev_event_request2 - Sent on incoming request to an address region
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
* @tcode: Transaction code of the incoming request
* @offset: The offset into the 48-bit per-node address space
+ * @source_node_id: Sender node ID
+ * @destination_node_id: Destination node ID
+ * @card: The index of the card from which the request came
+ * @generation: Bus generation in which the request is valid
* @handle: Reference to the kernel-side pending request
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
@@ -120,12 +171,42 @@ struct fw_cdev_event_response {
*
* The payload data for requests carrying data (write and lock requests)
* follows immediately and can be accessed through the @data field.
+ *
+ * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the
+ * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT,
+ * i.e. encodes the extended transaction code.
+ *
+ * @card may differ from &fw_cdev_get_info.card because requests are received
+ * from all cards of the Linux host. @source_node_id, @destination_node_id, and
+ * @generation pertain to that card. Destination node ID and bus generation may
+ * therefore differ from the corresponding fields of the last
+ * &fw_cdev_event_bus_reset.
+ *
+ * @destination_node_id may also differ from the current node ID because of a
+ * non-local bus ID part or in case of a broadcast write request. Note, a
+ * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a
+ * broadcast write request; the kernel will then release the kernel-side pending
+ * request but will not actually send a response packet.
+ *
+ * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already
+ * sent a write response immediately after the request was received; in this
+ * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to
+ * release the kernel-side pending request, though another response won't be
+ * sent.
+ *
+ * If the client subsequently needs to initiate requests to the sender node of
+ * an &fw_cdev_event_request2, it needs to use a device file with matching
+ * card index, node ID, and generation for outbound requests.
*/
-struct fw_cdev_event_request {
+struct fw_cdev_event_request2 {
__u64 closure;
__u32 type;
__u32 tcode;
__u64 offset;
+ __u32 source_node_id;
+ __u32 destination_node_id;
+ __u32 card;
+ __u32 generation;
__u32 handle;
__u32 length;
__u32 data[0];
@@ -141,26 +222,43 @@ struct fw_cdev_event_request {
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
- * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
- * stripped of all packets up until and including the interrupt packet are
- * returned in the @header field. The amount of header data per packet is as
- * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
+ * with the %FW_CDEV_ISO_INTERRUPT bit set.
*
- * In version 1 of this ABI, header data consisted of the 1394 isochronous
- * packet header, followed by quadlets from the packet payload if
- * &fw_cdev_create_iso_context.header_size > 4.
+ * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
- * In version 2 of this ABI, header data consist of the 1394 isochronous
- * packet header, followed by a timestamp quadlet if
- * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
- * packet payload if &fw_cdev_create_iso_context.header_size > 8.
+ * In version 3 and some implementations of version 2 of the ABI, &header_length
+ * is a multiple of 4 and &header contains timestamps of all packets up until
+ * the interrupt packet. The format of the timestamps is as described below for
+ * isochronous reception. In version 1 of the ABI, &header_length was 0.
*
- * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
+ * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE):
+ *
+ * The headers stripped of all packets up until and including the interrupt
+ * packet are returned in the @header field. The amount of header data per
+ * packet is as specified at iso context creation by
+ * &fw_cdev_create_iso_context.header_size.
+ *
+ * Hence, _interrupt.header_length / _context.header_size is the number of
+ * packets received in this interrupt event. The client can now iterate
+ * through the mmap()'ed DMA buffer according to this number of packets and
+ * to the buffer sizes as the client specified in &fw_cdev_queue_iso.
+ *
+ * Since version 2 of this ABI, the portion for each packet in _interrupt.header
+ * consists of the 1394 isochronous packet header, followed by a timestamp
+ * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets
+ * from the packet payload if &fw_cdev_create_iso_context.header_size > 8.
*
- * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
- * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
- * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
- * order.
+ * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits
+ * channel, 4 bits tcode, 4 bits sy, in big endian byte order.
+ * data_length is the actual received size of the packet without the four
+ * 1394 iso packet header bytes.
+ *
+ * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits
+ * cycleCount, in big endian byte order.
+ *
+ * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload
+ * data followed directly after the 1394 is header if header_size > 4.
+ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
*/
struct fw_cdev_event_iso_interrupt {
__u64 closure;
@@ -171,6 +269,43 @@ struct fw_cdev_event_iso_interrupt {
};
/**
+ * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed
+ * @closure: See &fw_cdev_event_common;
+ * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
+ * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
+ * @completed: Offset into the receive buffer; data before this offest is valid
+ *
+ * This event is sent in multichannel contexts (context type
+ * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
+ * chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens
+ * when a packet is completed and/or when a buffer chunk is completed depends
+ * on the hardware implementation.
+ *
+ * The buffer is continuously filled with the following data, per packet:
+ * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
+ * but in little endian byte order,
+ * - packet payload (as many bytes as specified in the data_length field of
+ * the 1394 iso packet header) in big endian byte order,
+ * - 0...3 padding bytes as needed to align the following trailer quadlet,
+ * - trailer quadlet, containing the reception timestamp as described at
+ * &fw_cdev_event_iso_interrupt, but in little endian byte order.
+ *
+ * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8.
+ * When processing the data, stop before a packet that would cross the
+ * @completed offset.
+ *
+ * A packet near the end of a buffer chunk will typically spill over into the
+ * next queued buffer chunk. It is the responsibility of the client to check
+ * for this condition, assemble a broken-up packet from its parts, and not to
+ * re-queue any buffer chunks in which as yet unread packet parts reside.
+ */
+struct fw_cdev_event_iso_interrupt_mc {
+ __u64 closure;
+ __u32 type;
+ __u32 completed;
+};
+
+/**
* struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
@@ -200,15 +335,45 @@ struct fw_cdev_event_iso_resource {
};
/**
+ * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
+ * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
+ * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
+ * @rcode: %RCODE_..., indicates success or failure of transmission
+ * @length: Data length in bytes
+ * @data: Incoming data
+ *
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
+ * except in case of a ping packet: Then, @length is 4, and @data[0] is the
+ * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
+ *
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
+ * consists of the two PHY packet quadlets, in host byte order.
+ */
+struct fw_cdev_event_phy_packet {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[0];
+};
+
+/**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
- * @common: Valid for all types
- * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
- * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
- * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
- * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
- * @iso_resource: Valid if @common.type ==
+ * @common: Valid for all types
+ * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
+ * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
+ * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
+ * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2
+ * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
+ * @iso_interrupt_mc: Valid if @common.type ==
+ * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
+ * @iso_resource: Valid if @common.type ==
* %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
+ * @phy_packet: Valid if @common.type ==
+ * %FW_CDEV_EVENT_PHY_PACKET_SENT or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
@@ -223,8 +388,11 @@ union fw_cdev_event {
struct fw_cdev_event_bus_reset bus_reset;
struct fw_cdev_event_response response;
struct fw_cdev_event_request request;
+ struct fw_cdev_event_request2 request2; /* added in 2.6.36 */
struct fw_cdev_event_iso_interrupt iso_interrupt;
- struct fw_cdev_event_iso_resource iso_resource;
+ struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
+ struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
+ struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
};
/* available since kernel version 2.6.22 */
@@ -256,23 +424,46 @@ union fw_cdev_event {
/* available since kernel version 2.6.34 */
#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
+/* available since kernel version 2.6.36 */
+#define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet)
+#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
+#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
+
/*
- * FW_CDEV_VERSION History
+ * ABI version history
* 1 (2.6.22) - initial version
+ * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
+ * - added %FW_CDEV_IOC_*_ISO_RESOURCE*,
+ * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST,
+ * %FW_CDEV_IOC_SEND_STREAM_PACKET
* (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
* (2.6.33) - IR has always packet-per-buffer semantics now, not one of
* dual-buffer or packet-per-buffer depending on hardware
+ * - shared use and auto-response for FCP registers
* 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
+ * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2
+ * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*,
+ * and &fw_cdev_allocate.region_end
+ * - implemented &fw_cdev_event_bus_reset.bm_node_id
+ * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS
+ * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL,
+ * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and
+ * %FW_CDEV_IOC_SET_ISO_CHANNELS
*/
-#define FW_CDEV_VERSION 3
+#define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */
/**
* struct fw_cdev_get_info - General purpose information ioctl
- * @version: The version field is just a running serial number.
- * We never break backwards compatibility, but may add more
- * structs and ioctls in later revisions.
+ * @version: The version field is just a running serial number. Both an
+ * input parameter (ABI version implemented by the client) and
+ * output parameter (ABI version implemented by the kernel).
+ * A client must not fill in an %FW_CDEV_VERSION defined from an
+ * included kernel header file but the actual version for which
+ * the client was implemented. This is necessary for forward
+ * compatibility. We never break backwards compatibility, but
+ * may add more structs, events, and ioctls in later revisions.
* @rom_length: If @rom is non-zero, at most rom_length bytes of configuration
* ROM will be copied into that user space address. In either
* case, @rom_length is updated with the actual length of the
@@ -339,28 +530,48 @@ struct fw_cdev_send_response {
};
/**
- * struct fw_cdev_allocate - Allocate a CSR address range
+ * struct fw_cdev_allocate - Allocate a CSR in an address range
* @offset: Start offset of the address range
* @closure: To be passed back to userspace in request events
- * @length: Length of the address range, in bytes
+ * @length: Length of the CSR, in bytes
* @handle: Handle to the allocation, written by the kernel
+ * @region_end: First address above the address range (added in ABI v4, 2.6.36)
*
* Allocate an address range in the 48-bit address space on the local node
* (the controller). This allows userspace to listen for requests with an
- * offset within that address range. When the kernel receives a request
- * within the range, an &fw_cdev_event_request event will be written back.
- * The @closure field is passed back to userspace in the response event.
+ * offset within that address range. Every time when the kernel receives a
+ * request within the range, an &fw_cdev_event_request2 event will be emitted.
+ * (If the kernel or the client implements ABI version <= 3, an
+ * &fw_cdev_event_request will be generated instead.)
+ *
+ * The @closure field is passed back to userspace in these request events.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
*
* The address range is allocated on all local nodes. The address allocation
- * is exclusive except for the FCP command and response registers.
+ * is exclusive except for the FCP command and response registers. If an
+ * exclusive address region is already in use, the ioctl fails with errno set
+ * to %EBUSY.
+ *
+ * If kernel and client implement ABI version >= 4, the kernel looks up a free
+ * spot of size @length inside [@offset..@region_end) and, if found, writes
+ * the start address of the new CSR back in @offset. I.e. @offset is an
+ * in and out parameter. If this automatic placement of a CSR in a bigger
+ * address range is not desired, the client simply needs to set @region_end
+ * = @offset + @length.
+ *
+ * If the kernel or the client implements ABI version <= 3, @region_end is
+ * ignored and effectively assumed to be @offset + @length.
+ *
+ * @region_end is only present in a kernel header >= 2.6.36. If necessary,
+ * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2.
*/
struct fw_cdev_allocate {
__u64 offset;
__u64 closure;
__u32 length;
__u32 handle;
+ __u64 region_end; /* available since kernel version 2.6.36 */
};
/**
@@ -382,9 +593,14 @@ struct fw_cdev_deallocate {
* Initiate a bus reset for the bus this device is on. The bus reset can be
* either the original (long) bus reset or the arbitrated (short) bus reset
* introduced in 1394a-2000.
+ *
+ * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset
+ * indicates when the reset actually happened. Since ABI v4, this may be
+ * considerably later than the ioctl because the kernel ensures a grace period
+ * between subsequent bus resets as per IEEE 1394 bus management specification.
*/
struct fw_cdev_initiate_bus_reset {
- __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */
+ __u32 type;
};
/**
@@ -408,9 +624,10 @@ struct fw_cdev_initiate_bus_reset {
*
* @immediate, @key, and @data array elements are CPU-endian quadlets.
*
- * If successful, the kernel adds the descriptor and writes back a handle to the
- * kernel-side object to be used for later removal of the descriptor block and
- * immediate key.
+ * If successful, the kernel adds the descriptor and writes back a @handle to
+ * the kernel-side object to be used for later removal of the descriptor block
+ * and immediate key. The kernel will also generate a bus reset to signal the
+ * change of the configuration ROM to other nodes.
*
* This ioctl affects the configuration ROMs of all local nodes.
* The ioctl only succeeds on device files which represent a local node.
@@ -429,38 +646,50 @@ struct fw_cdev_add_descriptor {
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
- * nodes' configuration ROMs.
+ * nodes' configuration ROMs. The kernel will also generate a bus reset to
+ * signal the change of the configuration ROM to other nodes.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
};
-#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
-#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
+#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
+#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
+#define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */
/**
- * struct fw_cdev_create_iso_context - Create a context for isochronous IO
- * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
- * @header_size: Header size to strip for receive contexts
- * @channel: Channel to bind to
- * @speed: Speed for transmit contexts
- * @closure: To be returned in &fw_cdev_event_iso_interrupt
+ * struct fw_cdev_create_iso_context - Create a context for isochronous I/O
+ * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or
+ * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL
+ * @header_size: Header size to strip in single-channel reception
+ * @channel: Channel to bind to in single-channel reception or transmission
+ * @speed: Transmission speed
+ * @closure: To be returned in &fw_cdev_event_iso_interrupt or
+ * &fw_cdev_event_iso_interrupt_multichannel
* @handle: Handle to context, written back by kernel
*
* Prior to sending or receiving isochronous I/O, a context must be created.
* The context records information about the transmit or receive configuration
* and typically maps to an underlying hardware resource. A context is set up
* for either sending or receiving. It is bound to a specific isochronous
- * channel.
+ * @channel.
+ *
+ * In case of multichannel reception, @header_size and @channel are ignored
+ * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS.
+ *
+ * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4
+ * and must be a multiple of 4. It is ignored in other context types.
+ *
+ * @speed is ignored in receive context types.
*
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
*
- * For receive contexts, @header_size must be at least 4 and must be a multiple
- * of 4.
- *
- * Note that the effect of a @header_size > 4 depends on
- * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
+ * Limitations:
+ * No more than one iso context can be created per fd.
+ * The total number of contexts that all userspace and kernelspace drivers can
+ * create on a card at a time is a hardware limit, typically 4 or 8 contexts per
+ * direction, and of them at most one multichannel receive context.
*/
struct fw_cdev_create_iso_context {
__u32 type;
@@ -471,6 +700,22 @@ struct fw_cdev_create_iso_context {
__u32 handle;
};
+/**
+ * struct fw_cdev_set_iso_channels - Select channels in multichannel reception
+ * @channels: Bitmask of channels to listen to
+ * @handle: Handle of the mutichannel receive context
+ *
+ * @channels is the bitwise or of 1ULL << n for each channel n to listen to.
+ *
+ * The ioctl fails with errno %EBUSY if there is already another receive context
+ * on a channel in @channels. In that case, the bitmask of all unoccupied
+ * channels is returned in @channels.
+ */
+struct fw_cdev_set_iso_channels {
+ __u64 channels;
+ __u32 handle;
+};
+
#define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v)
#define FW_CDEV_ISO_INTERRUPT (1 << 16)
#define FW_CDEV_ISO_SKIP (1 << 17)
@@ -481,42 +726,72 @@ struct fw_cdev_create_iso_context {
/**
* struct fw_cdev_iso_packet - Isochronous packet
- * @control: Contains the header length (8 uppermost bits), the sy field
- * (4 bits), the tag field (2 bits), a sync flag (1 bit),
- * a skip flag (1 bit), an interrupt flag (1 bit), and the
+ * @control: Contains the header length (8 uppermost bits),
+ * the sy field (4 bits), the tag field (2 bits), a sync flag
+ * or a skip flag (1 bit), an interrupt flag (1 bit), and the
* payload length (16 lowermost bits)
- * @header: Header and payload
+ * @header: Header and payload in case of a transmit context.
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
- *
* Use the FW_CDEV_ISO_ macros to fill in @control.
+ * The @header array is empty in case of receive contexts.
+ *
+ * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT:
+ *
+ * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of
+ * bytes in @header that will be prepended to the packet's payload. These bytes
+ * are copied into the kernel and will not be accessed after the ioctl has
+ * returned.
+ *
+ * The @control.SY and TAG fields are copied to the iso packet header. These
+ * fields are specified by IEEE 1394a and IEC 61883-1.
+ *
+ * The @control.SKIP flag specifies that no packet is to be sent in a frame.
+ * When using this, all other fields except @control.INTERRUPT must be zero.
+ *
+ * When a packet with the @control.INTERRUPT flag set has been completed, an
+ * &fw_cdev_event_iso_interrupt event will be sent.
+ *
+ * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE:
+ *
+ * @control.HEADER_LENGTH must be a multiple of the context's header_size.
+ * If the HEADER_LENGTH is larger than the context's header_size, multiple
+ * packets are queued for this entry.
+ *
+ * The @control.SY and TAG fields are ignored.
+ *
+ * If the @control.SYNC flag is set, the context drops all packets until a
+ * packet with a sy field is received which matches &fw_cdev_start_iso.sync.
+ *
+ * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for
+ * one packet (in addition to payload quadlets that have been defined as headers
+ * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure).
+ * If more bytes are received, the additional bytes are dropped. If less bytes
+ * are received, the remaining bytes in this part of the payload buffer will not
+ * be written to, not even by the next packet. I.e., packets received in
+ * consecutive frames will not necessarily be consecutive in memory. If an
+ * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally
+ * among them.
*
- * For transmit packets, the header length must be a multiple of 4 and specifies
- * the numbers of bytes in @header that will be prepended to the packet's
- * payload; these bytes are copied into the kernel and will not be accessed
- * after the ioctl has returned. The sy and tag fields are copied to the iso
- * packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
- * The skip flag specifies that no packet is to be sent in a frame; when using
- * this, all other fields except the interrupt flag must be zero.
- *
- * For receive packets, the header length must be a multiple of the context's
- * header size; if the header length is larger than the context's header size,
- * multiple packets are queued for this entry. The sy and tag fields are
- * ignored. If the sync flag is set, the context drops all packets until
- * a packet with a matching sy field is received (the sync value to wait for is
- * specified in the &fw_cdev_start_iso structure). The payload length defines
- * how many payload bytes can be received for one packet (in addition to payload
- * quadlets that have been defined as headers and are stripped and returned in
- * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the
- * additional bytes are dropped. If less bytes are received, the remaining
- * bytes in this part of the payload buffer will not be written to, not even by
- * the next packet, i.e., packets received in consecutive frames will not
- * necessarily be consecutive in memory. If an entry has queued multiple
- * packets, the payload length is divided equally among them.
- *
- * When a packet with the interrupt flag set has been completed, the
+ * When a packet with the @control.INTERRUPT flag set has been completed, an
* &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued
* multiple receive packets is completed when its last packet is completed.
+ *
+ * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ *
+ * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since
+ * it specifies a chunk of the mmap()'ed buffer, while the number and alignment
+ * of packets to be placed into the buffer chunk is not known beforehand.
+ *
+ * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room
+ * for header, payload, padding, and trailer bytes of one or more packets.
+ * It must be a multiple of 4.
+ *
+ * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described
+ * for single-channel reception.
+ *
+ * When a buffer chunk with the @control.INTERRUPT flag set has been filled
+ * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent.
*/
struct fw_cdev_iso_packet {
__u32 control;
@@ -525,9 +800,9 @@ struct fw_cdev_iso_packet {
/**
* struct fw_cdev_queue_iso - Queue isochronous packets for I/O
- * @packets: Userspace pointer to packet data
+ * @packets: Userspace pointer to an array of &fw_cdev_iso_packet
* @data: Pointer into mmap()'ed payload buffer
- * @size: Size of packet data in bytes
+ * @size: Size of the @packets array, in bytes
* @handle: Isochronous context handle
*
* Queue a number of isochronous packets for reception or transmission.
@@ -540,6 +815,9 @@ struct fw_cdev_iso_packet {
* The kernel may or may not queue all packets, but will write back updated
* values of the @packets, @data and @size fields, so the ioctl can be
* resubmitted easily.
+ *
+ * In case of a multichannel receive context, @data must be quadlet-aligned
+ * relative to the buffer start.
*/
struct fw_cdev_queue_iso {
__u64 packets;
@@ -698,4 +976,39 @@ struct fw_cdev_send_stream_packet {
__u32 speed;
};
+/**
+ * struct fw_cdev_send_phy_packet - send a PHY packet
+ * @closure: Passed back to userspace in the PHY-packet-sent event
+ * @data: First and second quadlet of the PHY packet
+ * @generation: The bus generation where packet is valid
+ *
+ * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
+ * on the same card as this device. After transmission, an
+ * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
+ *
+ * The payload @data[] shall be specified in host byte order. Usually,
+ * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets
+ * are an exception to this rule.
+ *
+ * The ioctl is only permitted on device files which represent a local node.
+ */
+struct fw_cdev_send_phy_packet {
+ __u64 closure;
+ __u32 data[2];
+ __u32 generation;
+};
+
+/**
+ * struct fw_cdev_receive_phy_packets - start reception of PHY packets
+ * @closure: Passed back to userspace in phy packet events
+ *
+ * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
+ * incoming PHY packets from any node on the same bus as the device.
+ *
+ * The ioctl is only permitted on device files which represent a local node.
+ */
+struct fw_cdev_receive_phy_packets {
+ __u64 closure;
+};
+
#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 72e2b8ac2a5a..1cd637ef62d2 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -32,11 +32,13 @@
#define CSR_CYCLE_TIME 0x200
#define CSR_BUS_TIME 0x204
#define CSR_BUSY_TIMEOUT 0x210
+#define CSR_PRIORITY_BUDGET 0x218
#define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228
+#define CSR_MAINT_UTILITY 0x230
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
@@ -89,6 +91,11 @@ struct fw_card {
struct list_head transaction_list;
unsigned long reset_jiffies;
+ u32 split_timeout_hi;
+ u32 split_timeout_lo;
+ unsigned int split_timeout_cycles;
+ unsigned int split_timeout_jiffies;
+
unsigned long long guid;
unsigned max_receive;
int link_speed;
@@ -104,18 +111,28 @@ struct fw_card {
bool beta_repeaters_present;
int index;
-
struct list_head link;
- /* Work struct for BM duties. */
- struct delayed_work work;
+ struct list_head phy_receiver_list;
+
+ struct delayed_work br_work; /* bus reset job */
+ bool br_short;
+
+ struct delayed_work bm_work; /* bus manager job */
int bm_retries;
int bm_generation;
__be32 bm_transaction_data[2];
+ int bm_node_id;
+ bool bm_abdicate;
+
+ bool priority_budget_implemented; /* controller feature */
+ bool broadcast_channel_auto_allocated; /* controller feature */
bool broadcast_channel_allocated;
u32 broadcast_channel;
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ __be32 maint_utility_register;
};
struct fw_attribute_group {
@@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
- int generation, int speed,
+ int generation,
unsigned long long offset,
void *data, size_t length,
void *callback_data);
@@ -269,10 +286,10 @@ struct fw_packet {
u32 timestamp;
/*
- * This callback is called when the packet transmission has
- * completed; for successful transmission, the status code is
- * the ack received from the destination, otherwise it's a
- * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
+ * This callback is called when the packet transmission has completed.
+ * For successful transmission, the status code is the ack received
+ * from the destination. Otherwise it is one of the juju-specific
+ * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
* The callback can be called from tasklet context and thus
* must never block.
*/
@@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc);
* scatter-gather streaming (e.g. assembling video frame automatically).
*/
struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt:1; /* Generate interrupt on this packet */
- u32 skip:1; /* Set to not send packet at all. */
- u32 tag:2;
- u32 sy:4;
- u32 header_length:8; /* Length of immediate header. */
- u32 header[0];
+ u16 payload_length; /* Length of indirect payload */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* tx: Set to not send packet at all */
+ /* rx: Sync bit, wait for matching sy */
+ u32 tag:2; /* tx: Tag in packet header */
+ u32 sy:4; /* tx: Sy in packet header */
+ u32 header_length:8; /* Length of immediate header */
+ u32 header[0]; /* tx: Top of 1394 isoch. data_block */
};
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
#define FW_ISO_CONTEXT_MATCH_TAG0 1
#define FW_ISO_CONTEXT_MATCH_TAG1 2
@@ -389,24 +408,31 @@ struct fw_iso_buffer {
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
u32 cycle, size_t header_length,
void *header, void *data);
+typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
+ dma_addr_t completed, void *data);
struct fw_iso_context {
struct fw_card *card;
int type;
int channel;
int speed;
size_t header_size;
- fw_iso_callback_t callback;
+ union {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+ } callback;
void *callback_data;
};
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
diff --git a/tools/firewire/Makefile b/tools/firewire/Makefile
new file mode 100644
index 000000000000..81767adaae7d
--- /dev/null
+++ b/tools/firewire/Makefile
@@ -0,0 +1,19 @@
+prefix = /usr
+nosy-dump-version = 0.4
+
+CC = gcc
+
+all : nosy-dump
+
+nosy-dump : CFLAGS = -Wall -O2 -g
+nosy-dump : CPPFLAGS = -DVERSION=\"$(nosy-dump-version)\" -I../../drivers/firewire
+nosy-dump : LDFLAGS = -g
+nosy-dump : LDLIBS = -lpopt
+
+nosy-dump : nosy-dump.o decode-fcp.o
+
+clean :
+ rm -rf *.o nosy-dump
+
+install :
+ install nosy-dump $(prefix)/bin/nosy-dump
diff --git a/tools/firewire/decode-fcp.c b/tools/firewire/decode-fcp.c
new file mode 100644
index 000000000000..e41223b6a4c8
--- /dev/null
+++ b/tools/firewire/decode-fcp.c
@@ -0,0 +1,213 @@
+#include <linux/firewire-constants.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "list.h"
+#include "nosy-dump.h"
+
+#define CSR_FCP_COMMAND 0xfffff0000b00ull
+#define CSR_FCP_RESPONSE 0xfffff0000d00ull
+
+static const char * const ctype_names[] = {
+ [0x0] = "control", [0x8] = "not implemented",
+ [0x1] = "status", [0x9] = "accepted",
+ [0x2] = "specific inquiry", [0xa] = "rejected",
+ [0x3] = "notify", [0xb] = "in transition",
+ [0x4] = "general inquiry", [0xc] = "stable",
+ [0x5] = "(reserved 0x05)", [0xd] = "changed",
+ [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)",
+ [0x7] = "(reserved 0x07)", [0xf] = "interim",
+};
+
+static const char * const subunit_type_names[] = {
+ [0x00] = "monitor", [0x10] = "(reserved 0x10)",
+ [0x01] = "audio", [0x11] = "(reserved 0x11)",
+ [0x02] = "printer", [0x12] = "(reserved 0x12)",
+ [0x03] = "disc", [0x13] = "(reserved 0x13)",
+ [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)",
+ [0x05] = "tuner", [0x15] = "(reserved 0x15)",
+ [0x06] = "ca", [0x16] = "(reserved 0x16)",
+ [0x07] = "camera", [0x17] = "(reserved 0x17)",
+ [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)",
+ [0x09] = "panel", [0x19] = "(reserved 0x19)",
+ [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)",
+ [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)",
+ [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique",
+ [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types",
+ [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte",
+ [0x0f] = "(reserved 0x0f)", [0x1f] = "unit",
+};
+
+struct avc_enum {
+ int value;
+ const char *name;
+};
+
+struct avc_field {
+ const char *name; /* Short name for field. */
+ int offset; /* Location of field, specified in bits; */
+ /* negative means from end of packet. */
+ int width; /* Width of field, 0 means use data_length. */
+ struct avc_enum *names;
+};
+
+struct avc_opcode_info {
+ const char *name;
+ struct avc_field fields[8];
+};
+
+struct avc_enum power_field_names[] = {
+ { 0x70, "on" },
+ { 0x60, "off" },
+ { }
+};
+
+static const struct avc_opcode_info opcode_info[256] = {
+
+ /* TA Document 1999026 */
+ /* AV/C Digital Interface Command Set General Specification 4.0 */
+ [0xb2] = { "power", {
+ { "state", 0, 8, power_field_names }
+ }
+ },
+ [0x30] = { "unit info", {
+ { "foo", 0, 8 },
+ { "unit_type", 8, 5 },
+ { "unit", 13, 3 },
+ { "company id", 16, 24 },
+ }
+ },
+ [0x31] = { "subunit info" },
+ [0x01] = { "reserve" },
+ [0xb0] = { "version" },
+ [0x00] = { "vendor dependent" },
+ [0x02] = { "plug info" },
+ [0x12] = { "channel usage" },
+ [0x24] = { "connect" },
+ [0x20] = { "connect av" },
+ [0x22] = { "connections" },
+ [0x11] = { "digital input" },
+ [0x10] = { "digital output" },
+ [0x25] = { "disconnect" },
+ [0x21] = { "disconnect av" },
+ [0x19] = { "input plug signal format" },
+ [0x18] = { "output plug signal format" },
+ [0x1f] = { "general bus setup" },
+
+ /* TA Document 1999025 */
+ /* AV/C Descriptor Mechanism Specification Version 1.0 */
+ [0x0c] = { "create descriptor" },
+ [0x08] = { "open descriptor" },
+ [0x09] = { "read descriptor" },
+ [0x0a] = { "write descriptor" },
+ [0x05] = { "open info block" },
+ [0x06] = { "read info block" },
+ [0x07] = { "write info block" },
+ [0x0b] = { "search descriptor" },
+ [0x0d] = { "object number select" },
+
+ /* TA Document 1999015 */
+ /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */
+ [0xb3] = { "rate", {
+ { "subfunction", 0, 8 },
+ { "result", 8, 8 },
+ { "plug_type", 16, 8 },
+ { "plug_id", 16, 8 },
+ }
+ },
+
+ /* TA Document 1999008 */
+ /* AV/C Audio Subunit Specification 1.0 */
+ [0xb8] = { "function block" },
+
+ /* TA Document 2001001 */
+ /* AV/C Panel Subunit Specification 1.1 */
+ [0x7d] = { "gui update" },
+ [0x7e] = { "push gui data" },
+ [0x7f] = { "user action" },
+ [0x7c] = { "pass through" },
+
+ /* */
+ [0x26] = { "asynchronous connection" },
+};
+
+struct avc_frame {
+ uint32_t operand0:8;
+ uint32_t opcode:8;
+ uint32_t subunit_id:3;
+ uint32_t subunit_type:5;
+ uint32_t ctype:4;
+ uint32_t cts:4;
+};
+
+static void
+decode_avc(struct link_transaction *t)
+{
+ struct avc_frame *frame =
+ (struct avc_frame *) t->request->packet.write_block.data;
+ const struct avc_opcode_info *info;
+ const char *name;
+ char buffer[32];
+ int i;
+
+ info = &opcode_info[frame->opcode];
+ if (info->name == NULL) {
+ snprintf(buffer, sizeof(buffer),
+ "(unknown opcode 0x%02x)", frame->opcode);
+ name = buffer;
+ } else {
+ name = info->name;
+ }
+
+ printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s",
+ ctype_names[frame->ctype], subunit_type_names[frame->subunit_type],
+ frame->subunit_id, name);
+
+ for (i = 0; info->fields[i].name != NULL; i++)
+ printf(", %s", info->fields[i].name);
+
+ printf("\n");
+}
+
+int
+decode_fcp(struct link_transaction *t)
+{
+ struct avc_frame *frame =
+ (struct avc_frame *) t->request->packet.write_block.data;
+ unsigned long long offset =
+ ((unsigned long long) t->request->packet.common.offset_high << 32) |
+ t->request->packet.common.offset_low;
+
+ if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST)
+ return 0;
+
+ if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) {
+ switch (frame->cts) {
+ case 0x00:
+ decode_avc(t);
+ break;
+ case 0x01:
+ printf("cal fcp frame (cts=0x01)\n");
+ break;
+ case 0x02:
+ printf("ehs fcp frame (cts=0x02)\n");
+ break;
+ case 0x03:
+ printf("havi fcp frame (cts=0x03)\n");
+ break;
+ case 0x0e:
+ printf("vendor specific fcp frame (cts=0x0e)\n");
+ break;
+ case 0x0f:
+ printf("extended cts\n");
+ break;
+ default:
+ printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts);
+ break;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/tools/firewire/list.h b/tools/firewire/list.h
new file mode 100644
index 000000000000..41f4bdadf634
--- /dev/null
+++ b/tools/firewire/list.h
@@ -0,0 +1,62 @@
+struct list {
+ struct list *next, *prev;
+};
+
+static inline void
+list_init(struct list *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline int
+list_empty(struct list *list)
+{
+ return list->next == list;
+}
+
+static inline void
+list_insert(struct list *link, struct list *new_link)
+{
+ new_link->prev = link->prev;
+ new_link->next = link;
+ new_link->prev->next = new_link;
+ new_link->next->prev = new_link;
+}
+
+static inline void
+list_append(struct list *list, struct list *new_link)
+{
+ list_insert((struct list *)list, new_link);
+}
+
+static inline void
+list_prepend(struct list *list, struct list *new_link)
+{
+ list_insert(list->next, new_link);
+}
+
+static inline void
+list_remove(struct list *link)
+{
+ link->prev->next = link->next;
+ link->next->prev = link->prev;
+}
+
+#define list_entry(link, type, member) \
+ ((type *)((char *)(link)-(unsigned long)(&((type *)0)->member)))
+
+#define list_head(list, type, member) \
+ list_entry((list)->next, type, member)
+
+#define list_tail(list, type, member) \
+ list_entry((list)->prev, type, member)
+
+#define list_next(elm, member) \
+ list_entry((elm)->member.next, typeof(*elm), member)
+
+#define list_for_each_entry(pos, list, member) \
+ for (pos = list_head(list, typeof(*pos), member); \
+ &pos->member != (list); \
+ pos = list_next(pos, member))
+
diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c
new file mode 100644
index 000000000000..f93b776370b6
--- /dev/null
+++ b/tools/firewire/nosy-dump.c
@@ -0,0 +1,1031 @@
+/*
+ * nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers
+ * Copyright (C) 2002-2006 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <byteswap.h>
+#include <endian.h>
+#include <fcntl.h>
+#include <linux/firewire-constants.h>
+#include <poll.h>
+#include <popt.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <termios.h>
+#include <unistd.h>
+
+#include "list.h"
+#include "nosy-dump.h"
+#include "nosy-user.h"
+
+enum {
+ PACKET_FIELD_DETAIL = 0x01,
+ PACKET_FIELD_DATA_LENGTH = 0x02,
+ /* Marks the fields we print in transaction view. */
+ PACKET_FIELD_TRANSACTION = 0x04,
+};
+
+static void print_packet(uint32_t *data, size_t length);
+static void decode_link_packet(struct link_packet *packet, size_t length,
+ int include_flags, int exclude_flags);
+static int run = 1;
+sig_t sys_sigint_handler;
+
+static char *option_nosy_device = "/dev/nosy";
+static char *option_view = "packet";
+static char *option_output;
+static char *option_input;
+static int option_hex;
+static int option_iso;
+static int option_cycle_start;
+static int option_version;
+static int option_verbose;
+
+enum {
+ VIEW_TRANSACTION,
+ VIEW_PACKET,
+ VIEW_STATS,
+};
+
+static const struct poptOption options[] = {
+ {
+ .longName = "device",
+ .shortName = 'd',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_nosy_device,
+ .descrip = "Path to nosy device.",
+ .argDescrip = "DEVICE"
+ },
+ {
+ .longName = "view",
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_view,
+ .descrip = "Specify view of bus traffic: packet, transaction or stats.",
+ .argDescrip = "VIEW"
+ },
+ {
+ .longName = "hex",
+ .shortName = 'x',
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_hex,
+ .descrip = "Print each packet in hex.",
+ },
+ {
+ .longName = "iso",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_iso,
+ .descrip = "Print iso packets.",
+ },
+ {
+ .longName = "cycle-start",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_cycle_start,
+ .descrip = "Print cycle start packets.",
+ },
+ {
+ .longName = "verbose",
+ .shortName = 'v',
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_verbose,
+ .descrip = "Verbose packet view.",
+ },
+ {
+ .longName = "output",
+ .shortName = 'o',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_output,
+ .descrip = "Log to output file.",
+ .argDescrip = "FILENAME"
+ },
+ {
+ .longName = "input",
+ .shortName = 'i',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_input,
+ .descrip = "Decode log from file.",
+ .argDescrip = "FILENAME"
+ },
+ {
+ .longName = "version",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_version,
+ .descrip = "Specify print version info.",
+ },
+ POPT_AUTOHELP
+ POPT_TABLEEND
+};
+
+/* Allow all ^C except the first to interrupt the program in the usual way. */
+static void
+sigint_handler(int signal_num)
+{
+ if (run == 1) {
+ run = 0;
+ signal(SIGINT, SIG_DFL);
+ }
+}
+
+static struct subaction *
+subaction_create(uint32_t *data, size_t length)
+{
+ struct subaction *sa;
+
+ /* we put the ack in the subaction struct for easy access. */
+ sa = malloc(sizeof *sa - sizeof sa->packet + length);
+ sa->ack = data[length / 4 - 1];
+ sa->length = length;
+ memcpy(&sa->packet, data, length);
+
+ return sa;
+}
+
+static void
+subaction_destroy(struct subaction *sa)
+{
+ free(sa);
+}
+
+static struct list pending_transaction_list = {
+ &pending_transaction_list, &pending_transaction_list
+};
+
+static struct link_transaction *
+link_transaction_lookup(int request_node, int response_node, int tlabel)
+{
+ struct link_transaction *t;
+
+ list_for_each_entry(t, &pending_transaction_list, link) {
+ if (t->request_node == request_node &&
+ t->response_node == response_node &&
+ t->tlabel == tlabel)
+ return t;
+ }
+
+ t = malloc(sizeof *t);
+ t->request_node = request_node;
+ t->response_node = response_node;
+ t->tlabel = tlabel;
+ list_init(&t->request_list);
+ list_init(&t->response_list);
+
+ list_append(&pending_transaction_list, &t->link);
+
+ return t;
+}
+
+static void
+link_transaction_destroy(struct link_transaction *t)
+{
+ struct subaction *sa;
+
+ while (!list_empty(&t->request_list)) {
+ sa = list_head(&t->request_list, struct subaction, link);
+ list_remove(&sa->link);
+ subaction_destroy(sa);
+ }
+ while (!list_empty(&t->response_list)) {
+ sa = list_head(&t->response_list, struct subaction, link);
+ list_remove(&sa->link);
+ subaction_destroy(sa);
+ }
+ free(t);
+}
+
+struct protocol_decoder {
+ const char *name;
+ int (*decode)(struct link_transaction *t);
+};
+
+static const struct protocol_decoder protocol_decoders[] = {
+ { "FCP", decode_fcp }
+};
+
+static void
+handle_transaction(struct link_transaction *t)
+{
+ struct subaction *sa;
+ int i;
+
+ if (!t->request) {
+ printf("BUG in handle_transaction\n");
+ return;
+ }
+
+ for (i = 0; i < array_length(protocol_decoders); i++)
+ if (protocol_decoders[i].decode(t))
+ break;
+
+ /* HACK: decode only fcp right now. */
+ return;
+
+ decode_link_packet(&t->request->packet, t->request->length,
+ PACKET_FIELD_TRANSACTION, 0);
+ if (t->response)
+ decode_link_packet(&t->response->packet, t->request->length,
+ PACKET_FIELD_TRANSACTION, 0);
+ else
+ printf("[no response]");
+
+ if (option_verbose) {
+ list_for_each_entry(sa, &t->request_list, link)
+ print_packet((uint32_t *) &sa->packet, sa->length);
+ list_for_each_entry(sa, &t->response_list, link)
+ print_packet((uint32_t *) &sa->packet, sa->length);
+ }
+ printf("\r\n");
+
+ link_transaction_destroy(t);
+}
+
+static void
+clear_pending_transaction_list(void)
+{
+ struct link_transaction *t;
+
+ while (!list_empty(&pending_transaction_list)) {
+ t = list_head(&pending_transaction_list,
+ struct link_transaction, link);
+ list_remove(&t->link);
+ link_transaction_destroy(t);
+ /* print unfinished transactions */
+ }
+}
+
+static const char * const tcode_names[] = {
+ [0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response",
+ [0x1] = "write_block_request", [0x7] = "read_block_response",
+ [0x2] = "write_response", [0x8] = "cycle_start",
+ [0x3] = "reserved", [0x9] = "lock_request",
+ [0x4] = "read_quadlet_request", [0xa] = "iso_data",
+ [0x5] = "read_block_request", [0xb] = "lock_response",
+};
+
+static const char * const ack_names[] = {
+ [0x0] = "no ack", [0x8] = "reserved (0x08)",
+ [0x1] = "ack_complete", [0x9] = "reserved (0x09)",
+ [0x2] = "ack_pending", [0xa] = "reserved (0x0a)",
+ [0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)",
+ [0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)",
+ [0x5] = "ack_busy_a", [0xd] = "ack_data_error",
+ [0x6] = "ack_busy_b", [0xe] = "ack_type_error",
+ [0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)",
+};
+
+static const char * const rcode_names[] = {
+ [0x0] = "complete", [0x4] = "conflict_error",
+ [0x1] = "reserved (0x01)", [0x5] = "data_error",
+ [0x2] = "reserved (0x02)", [0x6] = "type_error",
+ [0x3] = "reserved (0x03)", [0x7] = "address_error",
+};
+
+static const char * const retry_names[] = {
+ [0x0] = "retry_1",
+ [0x1] = "retry_x",
+ [0x2] = "retry_a",
+ [0x3] = "retry_b",
+};
+
+enum {
+ PACKET_RESERVED,
+ PACKET_REQUEST,
+ PACKET_RESPONSE,
+ PACKET_OTHER,
+};
+
+struct packet_info {
+ const char *name;
+ int type;
+ int response_tcode;
+ const struct packet_field *fields;
+ int field_count;
+};
+
+struct packet_field {
+ const char *name; /* Short name for field. */
+ int offset; /* Location of field, specified in bits; */
+ /* negative means from end of packet. */
+ int width; /* Width of field, 0 means use data_length. */
+ int flags; /* Show options. */
+ const char * const *value_names;
+};
+
+#define COMMON_REQUEST_FIELDS \
+ { "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \
+ { "tl", 16, 6 }, \
+ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
+ { "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \
+ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \
+ { "src", 32, 16, PACKET_FIELD_TRANSACTION }, \
+ { "offs", 48, 48, PACKET_FIELD_TRANSACTION }
+
+#define COMMON_RESPONSE_FIELDS \
+ { "dest", 0, 16 }, \
+ { "tl", 16, 6 }, \
+ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
+ { "tcode", 24, 4, 0, tcode_names }, \
+ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \
+ { "src", 32, 16 }, \
+ { "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names }
+
+static const struct packet_field read_quadlet_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "crc", 96, 32, PACKET_FIELD_DETAIL },
+ { "ack", 156, 4, 0, ack_names },
+};
+
+static const struct packet_field read_quadlet_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "data", 96, 32, PACKET_FIELD_TRANSACTION },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "ack", 188, 4, 0, ack_names },
+};
+
+static const struct packet_field read_block_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_TRANSACTION },
+ { "extended_tcode", 112, 16 },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "ack", 188, 4, 0, ack_names },
+};
+
+static const struct packet_field block_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH },
+ { "extended_tcode", 112, 16 },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "data", 160, 0, PACKET_FIELD_TRANSACTION },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field write_quadlet_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data", 96, 32, PACKET_FIELD_TRANSACTION },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field block_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION },
+ { "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "data", 160, 0, PACKET_FIELD_TRANSACTION },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field write_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "reserved", 64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field iso_data_fields[] = {
+ { "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH },
+ { "tag", 16, 2 },
+ { "channel", 18, 6 },
+ { "tcode", 24, 4, 0, tcode_names },
+ { "sy", 28, 4 },
+ { "crc", 32, 32, PACKET_FIELD_DETAIL },
+ { "data", 64, 0 },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_info packet_info[] = {
+ {
+ .name = "write_quadlet_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_WRITE_RESPONSE,
+ .fields = write_quadlet_request_fields,
+ .field_count = array_length(write_quadlet_request_fields)
+ },
+ {
+ .name = "write_block_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_WRITE_RESPONSE,
+ .fields = block_request_fields,
+ .field_count = array_length(block_request_fields)
+ },
+ {
+ .name = "write_response",
+ .type = PACKET_RESPONSE,
+ .fields = write_response_fields,
+ .field_count = array_length(write_response_fields)
+ },
+ {
+ .name = "reserved",
+ .type = PACKET_RESERVED,
+ },
+ {
+ .name = "read_quadlet_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_READ_QUADLET_RESPONSE,
+ .fields = read_quadlet_request_fields,
+ .field_count = array_length(read_quadlet_request_fields)
+ },
+ {
+ .name = "read_block_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_READ_BLOCK_RESPONSE,
+ .fields = read_block_request_fields,
+ .field_count = array_length(read_block_request_fields)
+ },
+ {
+ .name = "read_quadlet_response",
+ .type = PACKET_RESPONSE,
+ .fields = read_quadlet_response_fields,
+ .field_count = array_length(read_quadlet_response_fields)
+ },
+ {
+ .name = "read_block_response",
+ .type = PACKET_RESPONSE,
+ .fields = block_response_fields,
+ .field_count = array_length(block_response_fields)
+ },
+ {
+ .name = "cycle_start",
+ .type = PACKET_OTHER,
+ .fields = write_quadlet_request_fields,
+ .field_count = array_length(write_quadlet_request_fields)
+ },
+ {
+ .name = "lock_request",
+ .type = PACKET_REQUEST,
+ .fields = block_request_fields,
+ .field_count = array_length(block_request_fields)
+ },
+ {
+ .name = "iso_data",
+ .type = PACKET_OTHER,
+ .fields = iso_data_fields,
+ .field_count = array_length(iso_data_fields)
+ },
+ {
+ .name = "lock_response",
+ .type = PACKET_RESPONSE,
+ .fields = block_response_fields,
+ .field_count = array_length(block_response_fields)
+ },
+};
+
+static int
+handle_request_packet(uint32_t *data, size_t length)
+{
+ struct link_packet *p = (struct link_packet *) data;
+ struct subaction *sa, *prev;
+ struct link_transaction *t;
+
+ t = link_transaction_lookup(p->common.source, p->common.destination,
+ p->common.tlabel);
+ sa = subaction_create(data, length);
+ t->request = sa;
+
+ if (!list_empty(&t->request_list)) {
+ prev = list_tail(&t->request_list,
+ struct subaction, link);
+
+ if (!ACK_BUSY(prev->ack)) {
+ /*
+ * error, we should only see ack_busy_* before the
+ * ack_pending/ack_complete -- this is an ack_pending
+ * instead (ack_complete would have finished the
+ * transaction).
+ */
+ }
+
+ if (prev->packet.common.tcode != sa->packet.common.tcode ||
+ prev->packet.common.tlabel != sa->packet.common.tlabel) {
+ /* memcmp() ? */
+ /* error, these should match for retries. */
+ }
+ }
+
+ list_append(&t->request_list, &sa->link);
+
+ switch (sa->ack) {
+ case ACK_COMPLETE:
+ if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST &&
+ p->common.tcode != TCODE_WRITE_BLOCK_REQUEST)
+ /* error, unified transactions only allowed for write */;
+ list_remove(&t->link);
+ handle_transaction(t);
+ break;
+
+ case ACK_NO_ACK:
+ case ACK_DATA_ERROR:
+ case ACK_TYPE_ERROR:
+ list_remove(&t->link);
+ handle_transaction(t);
+ break;
+
+ case ACK_PENDING:
+ /* request subaction phase over, wait for response. */
+ break;
+
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ /* ok, wait for retry. */
+ /* check that retry protocol is respected. */
+ break;
+ }
+
+ return 1;
+}
+
+static int
+handle_response_packet(uint32_t *data, size_t length)
+{
+ struct link_packet *p = (struct link_packet *) data;
+ struct subaction *sa, *prev;
+ struct link_transaction *t;
+
+ t = link_transaction_lookup(p->common.destination, p->common.source,
+ p->common.tlabel);
+ if (list_empty(&t->request_list)) {
+ /* unsolicited response */
+ }
+
+ sa = subaction_create(data, length);
+ t->response = sa;
+
+ if (!list_empty(&t->response_list)) {
+ prev = list_tail(&t->response_list, struct subaction, link);
+
+ if (!ACK_BUSY(prev->ack)) {
+ /*
+ * error, we should only see ack_busy_* before the
+ * ack_pending/ack_complete
+ */
+ }
+
+ if (prev->packet.common.tcode != sa->packet.common.tcode ||
+ prev->packet.common.tlabel != sa->packet.common.tlabel) {
+ /* use memcmp() instead? */
+ /* error, these should match for retries. */
+ }
+ } else {
+ prev = list_tail(&t->request_list, struct subaction, link);
+ if (prev->ack != ACK_PENDING) {
+ /*
+ * error, should not get response unless last request got
+ * ack_pending.
+ */
+ }
+
+ if (packet_info[prev->packet.common.tcode].response_tcode !=
+ sa->packet.common.tcode) {
+ /* error, tcode mismatch */
+ }
+ }
+
+ list_append(&t->response_list, &sa->link);
+
+ switch (sa->ack) {
+ case ACK_COMPLETE:
+ case ACK_NO_ACK:
+ case ACK_DATA_ERROR:
+ case ACK_TYPE_ERROR:
+ list_remove(&t->link);
+ handle_transaction(t);
+ /* transaction complete, remove t from pending list. */
+ break;
+
+ case ACK_PENDING:
+ /* error for responses. */
+ break;
+
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ /* no problem, wait for next retry */
+ break;
+ }
+
+ return 1;
+}
+
+static int
+handle_packet(uint32_t *data, size_t length)
+{
+ if (length == 0) {
+ printf("bus reset\r\n");
+ clear_pending_transaction_list();
+ } else if (length > sizeof(struct phy_packet)) {
+ struct link_packet *p = (struct link_packet *) data;
+
+ switch (packet_info[p->common.tcode].type) {
+ case PACKET_REQUEST:
+ return handle_request_packet(data, length);
+
+ case PACKET_RESPONSE:
+ return handle_response_packet(data, length);
+
+ case PACKET_OTHER:
+ case PACKET_RESERVED:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static unsigned int
+get_bits(struct link_packet *packet, int offset, int width)
+{
+ uint32_t *data = (uint32_t *) packet;
+ uint32_t index, shift, mask;
+
+ index = offset / 32 + 1;
+ shift = 32 - (offset & 31) - width;
+ mask = width == 32 ? ~0 : (1 << width) - 1;
+
+ return (data[index] >> shift) & mask;
+}
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define byte_index(i) ((i) ^ 3)
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define byte_index(i) (i)
+#else
+#error unsupported byte order.
+#endif
+
+static void
+dump_data(unsigned char *data, int length)
+{
+ int i, print_length;
+
+ if (length > 128)
+ print_length = 128;
+ else
+ print_length = length;
+
+ for (i = 0; i < print_length; i++)
+ printf("%s%02hhx",
+ (i % 4 == 0 && i != 0) ? " " : "",
+ data[byte_index(i)]);
+
+ if (print_length < length)
+ printf(" (%d more bytes)", length - print_length);
+}
+
+static void
+decode_link_packet(struct link_packet *packet, size_t length,
+ int include_flags, int exclude_flags)
+{
+ const struct packet_info *pi;
+ int data_length = 0;
+ int i;
+
+ pi = &packet_info[packet->common.tcode];
+
+ for (i = 0; i < pi->field_count; i++) {
+ const struct packet_field *f = &pi->fields[i];
+ int offset;
+
+ if (f->flags & exclude_flags)
+ continue;
+ if (include_flags && !(f->flags & include_flags))
+ continue;
+
+ if (f->offset < 0)
+ offset = length * 8 + f->offset - 32;
+ else
+ offset = f->offset;
+
+ if (f->value_names != NULL) {
+ uint32_t bits;
+
+ bits = get_bits(packet, offset, f->width);
+ printf("%s", f->value_names[bits]);
+ } else if (f->width == 0) {
+ printf("%s=[", f->name);
+ dump_data((unsigned char *) packet + (offset / 8 + 4), data_length);
+ printf("]");
+ } else {
+ unsigned long long bits;
+ int high_width, low_width;
+
+ if ((offset & ~31) != ((offset + f->width - 1) & ~31)) {
+ /* Bit field spans quadlet boundary. */
+ high_width = ((offset + 31) & ~31) - offset;
+ low_width = f->width - high_width;
+
+ bits = get_bits(packet, offset, high_width);
+ bits = (bits << low_width) |
+ get_bits(packet, offset + high_width, low_width);
+ } else {
+ bits = get_bits(packet, offset, f->width);
+ }
+
+ printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits);
+
+ if (f->flags & PACKET_FIELD_DATA_LENGTH)
+ data_length = bits;
+ }
+
+ if (i < pi->field_count - 1)
+ printf(", ");
+ }
+}
+
+static void
+print_packet(uint32_t *data, size_t length)
+{
+ int i;
+
+ printf("%6u ", data[0]);
+
+ if (length == 4) {
+ printf("bus reset");
+ } else if (length < sizeof(struct phy_packet)) {
+ printf("short packet: ");
+ for (i = 1; i < length / 4; i++)
+ printf("%s%08x", i == 0 ? "[" : " ", data[i]);
+ printf("]");
+
+ } else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) {
+ struct phy_packet *pp = (struct phy_packet *) data;
+
+ /* phy packet are 3 quadlets: the 1 quadlet payload,
+ * the bitwise inverse of the payload and the snoop
+ * mode ack */
+
+ switch (pp->common.identifier) {
+ case PHY_PACKET_CONFIGURATION:
+ if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) {
+ printf("ext phy config: phy_id=%02x", pp->phy_config.root_id);
+ } else {
+ printf("phy config:");
+ if (pp->phy_config.set_root)
+ printf(" set_root_id=%02x", pp->phy_config.root_id);
+ if (pp->phy_config.set_gap_count)
+ printf(" set_gap_count=%d", pp->phy_config.gap_count);
+ }
+ break;
+
+ case PHY_PACKET_LINK_ON:
+ printf("link-on packet, phy_id=%02x", pp->link_on.phy_id);
+ break;
+
+ case PHY_PACKET_SELF_ID:
+ if (pp->self_id.extended) {
+ printf("extended self id: phy_id=%02x, seq=%d",
+ pp->ext_self_id.phy_id, pp->ext_self_id.sequence);
+ } else {
+ static const char * const speed_names[] = {
+ "S100", "S200", "S400", "BETA"
+ };
+ printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s",
+ pp->self_id.phy_id,
+ (pp->self_id.link_active ? "active" : "not active"),
+ pp->self_id.gap_count,
+ speed_names[pp->self_id.phy_speed],
+ (pp->self_id.contender ? ", irm contender" : ""),
+ (pp->self_id.initiated_reset ? ", initiator" : ""));
+ }
+ break;
+ default:
+ printf("unknown phy packet: ");
+ for (i = 1; i < length / 4; i++)
+ printf("%s%08x", i == 0 ? "[" : " ", data[i]);
+ printf("]");
+ break;
+ }
+ } else {
+ struct link_packet *packet = (struct link_packet *) data;
+
+ decode_link_packet(packet, length, 0,
+ option_verbose ? 0 : PACKET_FIELD_DETAIL);
+ }
+
+ if (option_hex) {
+ printf(" [");
+ dump_data((unsigned char *) data + 4, length - 4);
+ printf("]");
+ }
+
+ printf("\r\n");
+}
+
+#define HIDE_CURSOR "\033[?25l"
+#define SHOW_CURSOR "\033[?25h"
+#define CLEAR "\033[H\033[2J"
+
+static void
+print_stats(uint32_t *data, size_t length)
+{
+ static int bus_reset_count, short_packet_count, phy_packet_count;
+ static int tcode_count[16];
+ static struct timeval last_update;
+ struct timeval now;
+ int i;
+
+ if (length == 0)
+ bus_reset_count++;
+ else if (length < sizeof(struct phy_packet))
+ short_packet_count++;
+ else if (length == sizeof(struct phy_packet) && data[1] == ~data[2])
+ phy_packet_count++;
+ else {
+ struct link_packet *packet = (struct link_packet *) data;
+ tcode_count[packet->common.tcode]++;
+ }
+
+ gettimeofday(&now, NULL);
+ if (now.tv_sec <= last_update.tv_sec &&
+ now.tv_usec < last_update.tv_usec + 500000)
+ return;
+
+ last_update = now;
+ printf(CLEAR HIDE_CURSOR
+ " bus resets : %8d\n"
+ " short packets : %8d\n"
+ " phy packets : %8d\n",
+ bus_reset_count, short_packet_count, phy_packet_count);
+
+ for (i = 0; i < array_length(packet_info); i++)
+ if (packet_info[i].type != PACKET_RESERVED)
+ printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]);
+ printf(SHOW_CURSOR "\n");
+}
+
+static struct termios saved_attributes;
+
+static void
+reset_input_mode(void)
+{
+ tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes);
+}
+
+static void
+set_input_mode(void)
+{
+ struct termios tattr;
+
+ /* Make sure stdin is a terminal. */
+ if (!isatty(STDIN_FILENO)) {
+ fprintf(stderr, "Not a terminal.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Save the terminal attributes so we can restore them later. */
+ tcgetattr(STDIN_FILENO, &saved_attributes);
+ atexit(reset_input_mode);
+
+ /* Set the funny terminal modes. */
+ tcgetattr(STDIN_FILENO, &tattr);
+ tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */
+ tattr.c_cc[VMIN] = 1;
+ tattr.c_cc[VTIME] = 0;
+ tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr);
+}
+
+int main(int argc, const char *argv[])
+{
+ uint32_t buf[128 * 1024];
+ uint32_t filter;
+ int length, retval, view;
+ int fd = -1;
+ FILE *output = NULL, *input = NULL;
+ poptContext con;
+ char c;
+ struct pollfd pollfds[2];
+
+ sys_sigint_handler = signal(SIGINT, sigint_handler);
+
+ con = poptGetContext(NULL, argc, argv, options, 0);
+ retval = poptGetNextOpt(con);
+ if (retval < -1) {
+ poptPrintUsage(con, stdout, 0);
+ return -1;
+ }
+
+ if (option_version) {
+ printf("dump tool for nosy sniffer, version %s\n", VERSION);
+ return 0;
+ }
+
+ if (__BYTE_ORDER != __LITTLE_ENDIAN)
+ fprintf(stderr, "warning: nosy has only been tested on little "
+ "endian machines\n");
+
+ if (option_input != NULL) {
+ input = fopen(option_input, "r");
+ if (input == NULL) {
+ fprintf(stderr, "Could not open %s, %m\n", option_input);
+ return -1;
+ }
+ } else {
+ fd = open(option_nosy_device, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "Could not open %s, %m\n", option_nosy_device);
+ return -1;
+ }
+ set_input_mode();
+ }
+
+ if (strcmp(option_view, "transaction") == 0)
+ view = VIEW_TRANSACTION;
+ else if (strcmp(option_view, "stats") == 0)
+ view = VIEW_STATS;
+ else
+ view = VIEW_PACKET;
+
+ if (option_output) {
+ output = fopen(option_output, "w");
+ if (output == NULL) {
+ fprintf(stderr, "Could not open %s, %m\n", option_output);
+ return -1;
+ }
+ }
+
+ setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
+
+ filter = ~0;
+ if (!option_iso)
+ filter &= ~(1 << TCODE_STREAM_DATA);
+ if (!option_cycle_start)
+ filter &= ~(1 << TCODE_CYCLE_START);
+ if (view == VIEW_STATS)
+ filter = ~(1 << TCODE_CYCLE_START);
+
+ ioctl(fd, NOSY_IOC_FILTER, filter);
+
+ ioctl(fd, NOSY_IOC_START);
+
+ pollfds[0].fd = fd;
+ pollfds[0].events = POLLIN;
+ pollfds[1].fd = STDIN_FILENO;
+ pollfds[1].events = POLLIN;
+
+ while (run) {
+ if (input != NULL) {
+ if (fread(&length, sizeof length, 1, input) != 1)
+ return 0;
+ fread(buf, 1, length, input);
+ } else {
+ poll(pollfds, 2, -1);
+ if (pollfds[1].revents) {
+ read(STDIN_FILENO, &c, sizeof c);
+ switch (c) {
+ case 'q':
+ if (output != NULL)
+ fclose(output);
+ return 0;
+ }
+ }
+
+ if (pollfds[0].revents)
+ length = read(fd, buf, sizeof buf);
+ else
+ continue;
+ }
+
+ if (output != NULL) {
+ fwrite(&length, sizeof length, 1, output);
+ fwrite(buf, 1, length, output);
+ }
+
+ switch (view) {
+ case VIEW_TRANSACTION:
+ handle_packet(buf, length);
+ break;
+ case VIEW_PACKET:
+ print_packet(buf, length);
+ break;
+ case VIEW_STATS:
+ print_stats(buf, length);
+ break;
+ }
+ }
+
+ if (output != NULL)
+ fclose(output);
+
+ close(fd);
+
+ poptFreeContext(con);
+
+ return 0;
+}
diff --git a/tools/firewire/nosy-dump.h b/tools/firewire/nosy-dump.h
new file mode 100644
index 000000000000..3a4b5b33ba5d
--- /dev/null
+++ b/tools/firewire/nosy-dump.h
@@ -0,0 +1,173 @@
+#ifndef __nosy_dump_h__
+#define __nosy_dump_h__
+
+#define array_length(array) (sizeof(array) / sizeof(array[0]))
+
+#define ACK_NO_ACK 0x0
+#define ACK_DONE(a) ((a >> 2) == 0)
+#define ACK_BUSY(a) ((a >> 2) == 1)
+#define ACK_ERROR(a) ((a >> 2) == 3)
+
+#include <stdint.h>
+
+struct phy_packet {
+ uint32_t timestamp;
+ union {
+ struct {
+ uint32_t zero:24;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } common, link_on;
+
+ struct {
+ uint32_t zero:16;
+ uint32_t gap_count:6;
+ uint32_t set_gap_count:1;
+ uint32_t set_root:1;
+ uint32_t root_id:6;
+ uint32_t identifier:2;
+ } phy_config;
+
+ struct {
+ uint32_t more_packets:1;
+ uint32_t initiated_reset:1;
+ uint32_t port2:2;
+ uint32_t port1:2;
+ uint32_t port0:2;
+ uint32_t power_class:3;
+ uint32_t contender:1;
+ uint32_t phy_delay:2;
+ uint32_t phy_speed:2;
+ uint32_t gap_count:6;
+ uint32_t link_active:1;
+ uint32_t extended:1;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } self_id;
+
+ struct {
+ uint32_t more_packets:1;
+ uint32_t reserved1:1;
+ uint32_t porth:2;
+ uint32_t portg:2;
+ uint32_t portf:2;
+ uint32_t porte:2;
+ uint32_t portd:2;
+ uint32_t portc:2;
+ uint32_t portb:2;
+ uint32_t porta:2;
+ uint32_t reserved0:2;
+ uint32_t sequence:3;
+ uint32_t extended:1;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } ext_self_id;
+ };
+ uint32_t inverted;
+ uint32_t ack;
+};
+
+#define TCODE_PHY_PACKET 0x10
+
+#define PHY_PACKET_CONFIGURATION 0x00
+#define PHY_PACKET_LINK_ON 0x01
+#define PHY_PACKET_SELF_ID 0x02
+
+struct link_packet {
+ uint32_t timestamp;
+ union {
+ struct {
+ uint32_t priority:4;
+ uint32_t tcode:4;
+ uint32_t rt:2;
+ uint32_t tlabel:6;
+ uint32_t destination:16;
+
+ uint32_t offset_high:16;
+ uint32_t source:16;
+
+ uint32_t offset_low;
+ } common;
+
+ struct {
+ uint32_t common[3];
+ uint32_t crc;
+ } read_quadlet;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } read_quadlet_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ } read_block;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ uint32_t data[0];
+ /* crc and ack follows. */
+ } read_block_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } write_quadlet;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ uint32_t data[0];
+ /* crc and ack follows. */
+ } write_block;
+
+ struct {
+ uint32_t common[3];
+ uint32_t crc;
+ } write_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } cycle_start;
+
+ struct {
+ uint32_t sy:4;
+ uint32_t tcode:4;
+ uint32_t channel:6;
+ uint32_t tag:2;
+ uint32_t data_length:16;
+
+ uint32_t crc;
+ } iso_data;
+ };
+};
+
+struct subaction {
+ uint32_t ack;
+ size_t length;
+ struct list link;
+ struct link_packet packet;
+};
+
+struct link_transaction {
+ int request_node, response_node, tlabel;
+ struct subaction *request, *response;
+ struct list request_list, response_list;
+ struct list link;
+};
+
+int decode_fcp(struct link_transaction *t);
+
+#endif /* __nosy_dump_h__ */