3683 lines
114 KiB
Diff
3683 lines
114 KiB
Diff
|
linux1394-2.6.git tree vs. linus v2.6.29-rc3-git1 on 20090130 by jarod
|
||
|
|
||
|
---
|
||
|
firewire-git/drivers/firewire/fw-card.c | 68 -
|
||
|
firewire-git/drivers/firewire/fw-cdev.c | 1014 +++++++++++++++++--------
|
||
|
firewire-git/drivers/firewire/fw-device.c | 43 -
|
||
|
firewire-git/drivers/firewire/fw-device.h | 7
|
||
|
firewire-git/drivers/firewire/fw-iso.c | 225 ++++-
|
||
|
firewire-git/drivers/firewire/fw-ohci.c | 236 ++---
|
||
|
firewire-git/drivers/firewire/fw-sbp2.c | 57 -
|
||
|
firewire-git/drivers/firewire/fw-topology.c | 28
|
||
|
firewire-git/drivers/firewire/fw-topology.h | 19
|
||
|
firewire-git/drivers/firewire/fw-transaction.c | 151 +--
|
||
|
firewire-git/drivers/firewire/fw-transaction.h | 125 ---
|
||
|
include/linux/firewire-cdev.h | 170 +++-
|
||
|
12 files changed, 1359 insertions(+), 784 deletions(-)
|
||
|
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-card.c firewire-git/drivers/firewire/fw-card.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-card.c 2009-01-30 13:39:02.989651512 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-card.c 2009-01-30 13:35:51.859771884 -0500
|
||
|
@@ -63,8 +63,7 @@ static int descriptor_count;
|
||
|
#define BIB_CMC ((1) << 30)
|
||
|
#define BIB_IMC ((1) << 31)
|
||
|
|
||
|
-static u32 *
|
||
|
-generate_config_rom(struct fw_card *card, size_t *config_rom_length)
|
||
|
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
|
||
|
{
|
||
|
struct fw_descriptor *desc;
|
||
|
static u32 config_rom[256];
|
||
|
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card
|
||
|
return config_rom;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-update_config_roms(void)
|
||
|
+static void update_config_roms(void)
|
||
|
{
|
||
|
struct fw_card *card;
|
||
|
u32 *config_rom;
|
||
|
@@ -141,8 +139,7 @@ update_config_roms(void)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-int
|
||
|
-fw_core_add_descriptor(struct fw_descriptor *desc)
|
||
|
+int fw_core_add_descriptor(struct fw_descriptor *desc)
|
||
|
{
|
||
|
size_t i;
|
||
|
|
||
|
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descrip
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_core_remove_descriptor(struct fw_descriptor *desc)
|
||
|
+void fw_core_remove_descriptor(struct fw_descriptor *desc)
|
||
|
{
|
||
|
mutex_lock(&card_mutex);
|
||
|
|
||
|
@@ -189,8 +185,7 @@ static const char gap_count_table[] = {
|
||
|
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
|
||
|
};
|
||
|
|
||
|
-void
|
||
|
-fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
||
|
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
||
|
{
|
||
|
int scheduled;
|
||
|
|
||
|
@@ -200,8 +195,7 @@ fw_schedule_bm_work(struct fw_card *card
|
||
|
fw_card_put(card);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-fw_card_bm_work(struct work_struct *work)
|
||
|
+static void fw_card_bm_work(struct work_struct *work)
|
||
|
{
|
||
|
struct fw_card *card = container_of(work, struct fw_card, work.work);
|
||
|
struct fw_device *root_device;
|
||
|
@@ -371,17 +365,16 @@ fw_card_bm_work(struct work_struct *work
|
||
|
fw_card_put(card);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-flush_timer_callback(unsigned long data)
|
||
|
+static void flush_timer_callback(unsigned long data)
|
||
|
{
|
||
|
struct fw_card *card = (struct fw_card *)data;
|
||
|
|
||
|
fw_flush_transactions(card);
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
|
||
|
- struct device *device)
|
||
|
+void fw_card_initialize(struct fw_card *card,
|
||
|
+ const struct fw_card_driver *driver,
|
||
|
+ struct device *device)
|
||
|
{
|
||
|
static atomic_t index = ATOMIC_INIT(-1);
|
||
|
|
||
|
@@ -406,9 +399,8 @@ fw_card_initialize(struct fw_card *card,
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_card_initialize);
|
||
|
|
||
|
-int
|
||
|
-fw_card_add(struct fw_card *card,
|
||
|
- u32 max_receive, u32 link_speed, u64 guid)
|
||
|
+int fw_card_add(struct fw_card *card,
|
||
|
+ u32 max_receive, u32 link_speed, u64 guid)
|
||
|
{
|
||
|
u32 *config_rom;
|
||
|
size_t length;
|
||
|
@@ -435,23 +427,20 @@ EXPORT_SYMBOL(fw_card_add);
|
||
|
* dummy driver just fails all IO.
|
||
|
*/
|
||
|
|
||
|
-static int
|
||
|
-dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
|
||
|
+static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
|
||
|
{
|
||
|
BUG();
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-dummy_update_phy_reg(struct fw_card *card, int address,
|
||
|
- int clear_bits, int set_bits)
|
||
|
+static int dummy_update_phy_reg(struct fw_card *card, int address,
|
||
|
+ int clear_bits, int set_bits)
|
||
|
{
|
||
|
return -ENODEV;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-dummy_set_config_rom(struct fw_card *card,
|
||
|
- u32 *config_rom, size_t length)
|
||
|
+static int dummy_set_config_rom(struct fw_card *card,
|
||
|
+ u32 *config_rom, size_t length)
|
||
|
{
|
||
|
/*
|
||
|
* We take the card out of card_list before setting the dummy
|
||
|
@@ -461,27 +450,23 @@ dummy_set_config_rom(struct fw_card *car
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-dummy_send_request(struct fw_card *card, struct fw_packet *packet)
|
||
|
+static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
|
||
|
{
|
||
|
packet->callback(packet, card, -ENODEV);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-dummy_send_response(struct fw_card *card, struct fw_packet *packet)
|
||
|
+static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
|
||
|
{
|
||
|
packet->callback(packet, card, -ENODEV);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
|
||
|
+static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
|
||
|
{
|
||
|
return -ENOENT;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-dummy_enable_phys_dma(struct fw_card *card,
|
||
|
- int node_id, int generation)
|
||
|
+static int dummy_enable_phys_dma(struct fw_card *card,
|
||
|
+ int node_id, int generation)
|
||
|
{
|
||
|
return -ENODEV;
|
||
|
}
|
||
|
@@ -496,16 +481,14 @@ static struct fw_card_driver dummy_drive
|
||
|
.enable_phys_dma = dummy_enable_phys_dma,
|
||
|
};
|
||
|
|
||
|
-void
|
||
|
-fw_card_release(struct kref *kref)
|
||
|
+void fw_card_release(struct kref *kref)
|
||
|
{
|
||
|
struct fw_card *card = container_of(kref, struct fw_card, kref);
|
||
|
|
||
|
complete(&card->done);
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_core_remove_card(struct fw_card *card)
|
||
|
+void fw_core_remove_card(struct fw_card *card)
|
||
|
{
|
||
|
card->driver->update_phy_reg(card, 4,
|
||
|
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
|
||
|
@@ -529,8 +512,7 @@ fw_core_remove_card(struct fw_card *card
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_core_remove_card);
|
||
|
|
||
|
-int
|
||
|
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
|
||
|
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
|
||
|
{
|
||
|
int reg = short_reset ? 5 : 1;
|
||
|
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-cdev.c firewire-git/drivers/firewire/fw-cdev.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-cdev.c 2008-11-04 11:19:19.000000000 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-cdev.c 2009-01-30 13:35:51.860646788 -0500
|
||
|
@@ -18,87 +18,162 @@
|
||
|
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||
|
*/
|
||
|
|
||
|
-#include <linux/module.h>
|
||
|
-#include <linux/kernel.h>
|
||
|
-#include <linux/wait.h>
|
||
|
-#include <linux/errno.h>
|
||
|
+#include <linux/compat.h>
|
||
|
+#include <linux/delay.h>
|
||
|
#include <linux/device.h>
|
||
|
-#include <linux/vmalloc.h>
|
||
|
+#include <linux/errno.h>
|
||
|
+#include <linux/firewire-cdev.h>
|
||
|
+#include <linux/idr.h>
|
||
|
+#include <linux/jiffies.h>
|
||
|
+#include <linux/kernel.h>
|
||
|
+#include <linux/kref.h>
|
||
|
+#include <linux/mm.h>
|
||
|
+#include <linux/module.h>
|
||
|
+#include <linux/mutex.h>
|
||
|
#include <linux/poll.h>
|
||
|
#include <linux/preempt.h>
|
||
|
+#include <linux/spinlock.h>
|
||
|
#include <linux/time.h>
|
||
|
-#include <linux/delay.h>
|
||
|
-#include <linux/mm.h>
|
||
|
-#include <linux/idr.h>
|
||
|
-#include <linux/compat.h>
|
||
|
-#include <linux/firewire-cdev.h>
|
||
|
+#include <linux/vmalloc.h>
|
||
|
+#include <linux/wait.h>
|
||
|
+#include <linux/workqueue.h>
|
||
|
+
|
||
|
#include <asm/system.h>
|
||
|
#include <asm/uaccess.h>
|
||
|
-#include "fw-transaction.h"
|
||
|
-#include "fw-topology.h"
|
||
|
+
|
||
|
#include "fw-device.h"
|
||
|
+#include "fw-topology.h"
|
||
|
+#include "fw-transaction.h"
|
||
|
+
|
||
|
+struct client {
|
||
|
+ u32 version;
|
||
|
+ struct fw_device *device;
|
||
|
+
|
||
|
+ spinlock_t lock;
|
||
|
+ bool in_shutdown;
|
||
|
+ struct idr resource_idr;
|
||
|
+ struct list_head event_list;
|
||
|
+ wait_queue_head_t wait;
|
||
|
+ u64 bus_reset_closure;
|
||
|
+
|
||
|
+ struct fw_iso_context *iso_context;
|
||
|
+ u64 iso_closure;
|
||
|
+ struct fw_iso_buffer buffer;
|
||
|
+ unsigned long vm_start;
|
||
|
|
||
|
-struct client;
|
||
|
-struct client_resource {
|
||
|
struct list_head link;
|
||
|
- void (*release)(struct client *client, struct client_resource *r);
|
||
|
- u32 handle;
|
||
|
+ struct kref kref;
|
||
|
+};
|
||
|
+
|
||
|
+static inline void client_get(struct client *client)
|
||
|
+{
|
||
|
+ kref_get(&client->kref);
|
||
|
+}
|
||
|
+
|
||
|
+static void client_release(struct kref *kref)
|
||
|
+{
|
||
|
+ struct client *client = container_of(kref, struct client, kref);
|
||
|
+
|
||
|
+ fw_device_put(client->device);
|
||
|
+ kfree(client);
|
||
|
+}
|
||
|
+
|
||
|
+static void client_put(struct client *client)
|
||
|
+{
|
||
|
+ kref_put(&client->kref, client_release);
|
||
|
+}
|
||
|
+
|
||
|
+struct client_resource;
|
||
|
+typedef void (*client_resource_release_fn_t)(struct client *,
|
||
|
+ struct client_resource *);
|
||
|
+struct client_resource {
|
||
|
+ client_resource_release_fn_t release;
|
||
|
+ int handle;
|
||
|
+};
|
||
|
+
|
||
|
+struct address_handler_resource {
|
||
|
+ struct client_resource resource;
|
||
|
+ struct fw_address_handler handler;
|
||
|
+ __u64 closure;
|
||
|
+ struct client *client;
|
||
|
+};
|
||
|
+
|
||
|
+struct outbound_transaction_resource {
|
||
|
+ struct client_resource resource;
|
||
|
+ struct fw_transaction transaction;
|
||
|
+};
|
||
|
+
|
||
|
+struct inbound_transaction_resource {
|
||
|
+ struct client_resource resource;
|
||
|
+ struct fw_request *request;
|
||
|
+ void *data;
|
||
|
+ size_t length;
|
||
|
};
|
||
|
|
||
|
+struct descriptor_resource {
|
||
|
+ struct client_resource resource;
|
||
|
+ struct fw_descriptor descriptor;
|
||
|
+ u32 data[0];
|
||
|
+};
|
||
|
+
|
||
|
+struct iso_resource {
|
||
|
+ struct client_resource resource;
|
||
|
+ struct client *client;
|
||
|
+ /* Schedule work and access todo only with client->lock held. */
|
||
|
+ struct delayed_work work;
|
||
|
+ enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
|
||
|
+ ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
|
||
|
+ int generation;
|
||
|
+ u64 channels;
|
||
|
+ s32 bandwidth;
|
||
|
+ struct iso_resource_event *e_alloc, *e_dealloc;
|
||
|
+};
|
||
|
+
|
||
|
+static void schedule_iso_resource(struct iso_resource *);
|
||
|
+static void release_iso_resource(struct client *, struct client_resource *);
|
||
|
+
|
||
|
/*
|
||
|
* dequeue_event() just kfree()'s the event, so the event has to be
|
||
|
- * the first field in the struct.
|
||
|
+ * the first field in a struct XYZ_event.
|
||
|
*/
|
||
|
-
|
||
|
struct event {
|
||
|
struct { void *data; size_t size; } v[2];
|
||
|
struct list_head link;
|
||
|
};
|
||
|
|
||
|
-struct bus_reset {
|
||
|
+struct bus_reset_event {
|
||
|
struct event event;
|
||
|
struct fw_cdev_event_bus_reset reset;
|
||
|
};
|
||
|
|
||
|
-struct response {
|
||
|
+struct outbound_transaction_event {
|
||
|
struct event event;
|
||
|
- struct fw_transaction transaction;
|
||
|
struct client *client;
|
||
|
- struct client_resource resource;
|
||
|
+ struct outbound_transaction_resource r;
|
||
|
struct fw_cdev_event_response response;
|
||
|
};
|
||
|
|
||
|
-struct iso_interrupt {
|
||
|
+struct inbound_transaction_event {
|
||
|
struct event event;
|
||
|
- struct fw_cdev_event_iso_interrupt interrupt;
|
||
|
+ struct fw_cdev_event_request request;
|
||
|
};
|
||
|
|
||
|
-struct client {
|
||
|
- u32 version;
|
||
|
- struct fw_device *device;
|
||
|
- spinlock_t lock;
|
||
|
- u32 resource_handle;
|
||
|
- struct list_head resource_list;
|
||
|
- struct list_head event_list;
|
||
|
- wait_queue_head_t wait;
|
||
|
- u64 bus_reset_closure;
|
||
|
-
|
||
|
- struct fw_iso_context *iso_context;
|
||
|
- u64 iso_closure;
|
||
|
- struct fw_iso_buffer buffer;
|
||
|
- unsigned long vm_start;
|
||
|
+struct iso_interrupt_event {
|
||
|
+ struct event event;
|
||
|
+ struct fw_cdev_event_iso_interrupt interrupt;
|
||
|
+};
|
||
|
|
||
|
- struct list_head link;
|
||
|
+struct iso_resource_event {
|
||
|
+ struct event event;
|
||
|
+ struct fw_cdev_event_iso_resource resource;
|
||
|
};
|
||
|
|
||
|
-static inline void __user *
|
||
|
-u64_to_uptr(__u64 value)
|
||
|
+static inline void __user *u64_to_uptr(__u64 value)
|
||
|
{
|
||
|
return (void __user *)(unsigned long)value;
|
||
|
}
|
||
|
|
||
|
-static inline __u64
|
||
|
-uptr_to_u64(void __user *ptr)
|
||
|
+static inline __u64 uptr_to_u64(void __user *ptr)
|
||
|
{
|
||
|
return (__u64)(unsigned long)ptr;
|
||
|
}
|
||
|
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inod
|
||
|
{
|
||
|
struct fw_device *device;
|
||
|
struct client *client;
|
||
|
- unsigned long flags;
|
||
|
|
||
|
device = fw_device_get_by_devt(inode->i_rdev);
|
||
|
if (device == NULL)
|
||
|
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inod
|
||
|
}
|
||
|
|
||
|
client->device = device;
|
||
|
- INIT_LIST_HEAD(&client->event_list);
|
||
|
- INIT_LIST_HEAD(&client->resource_list);
|
||
|
spin_lock_init(&client->lock);
|
||
|
+ idr_init(&client->resource_idr);
|
||
|
+ INIT_LIST_HEAD(&client->event_list);
|
||
|
init_waitqueue_head(&client->wait);
|
||
|
+ kref_init(&client->kref);
|
||
|
|
||
|
file->private_data = client;
|
||
|
|
||
|
- spin_lock_irqsave(&device->card->lock, flags);
|
||
|
+ mutex_lock(&device->client_list_mutex);
|
||
|
list_add_tail(&client->link, &device->client_list);
|
||
|
- spin_unlock_irqrestore(&device->card->lock, flags);
|
||
|
+ mutex_unlock(&device->client_list_mutex);
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
@@ -150,68 +225,69 @@ static void queue_event(struct client *c
|
||
|
event->v[1].size = size1;
|
||
|
|
||
|
spin_lock_irqsave(&client->lock, flags);
|
||
|
- list_add_tail(&event->link, &client->event_list);
|
||
|
+ if (client->in_shutdown)
|
||
|
+ kfree(event);
|
||
|
+ else
|
||
|
+ list_add_tail(&event->link, &client->event_list);
|
||
|
spin_unlock_irqrestore(&client->lock, flags);
|
||
|
|
||
|
wake_up_interruptible(&client->wait);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-dequeue_event(struct client *client, char __user *buffer, size_t count)
|
||
|
+static int dequeue_event(struct client *client,
|
||
|
+ char __user *buffer, size_t count)
|
||
|
{
|
||
|
- unsigned long flags;
|
||
|
struct event *event;
|
||
|
size_t size, total;
|
||
|
- int i, retval;
|
||
|
+ int i, ret;
|
||
|
|
||
|
- retval = wait_event_interruptible(client->wait,
|
||
|
- !list_empty(&client->event_list) ||
|
||
|
- fw_device_is_shutdown(client->device));
|
||
|
- if (retval < 0)
|
||
|
- return retval;
|
||
|
+ ret = wait_event_interruptible(client->wait,
|
||
|
+ !list_empty(&client->event_list) ||
|
||
|
+ fw_device_is_shutdown(client->device));
|
||
|
+ if (ret < 0)
|
||
|
+ return ret;
|
||
|
|
||
|
if (list_empty(&client->event_list) &&
|
||
|
fw_device_is_shutdown(client->device))
|
||
|
return -ENODEV;
|
||
|
|
||
|
- spin_lock_irqsave(&client->lock, flags);
|
||
|
- event = container_of(client->event_list.next, struct event, link);
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ event = list_first_entry(&client->event_list, struct event, link);
|
||
|
list_del(&event->link);
|
||
|
- spin_unlock_irqrestore(&client->lock, flags);
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
|
||
|
total = 0;
|
||
|
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
|
||
|
size = min(event->v[i].size, count - total);
|
||
|
if (copy_to_user(buffer + total, event->v[i].data, size)) {
|
||
|
- retval = -EFAULT;
|
||
|
+ ret = -EFAULT;
|
||
|
goto out;
|
||
|
}
|
||
|
total += size;
|
||
|
}
|
||
|
- retval = total;
|
||
|
+ ret = total;
|
||
|
|
||
|
out:
|
||
|
kfree(event);
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
-static ssize_t
|
||
|
-fw_device_op_read(struct file *file,
|
||
|
- char __user *buffer, size_t count, loff_t *offset)
|
||
|
+static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
|
||
|
+ size_t count, loff_t *offset)
|
||
|
{
|
||
|
struct client *client = file->private_data;
|
||
|
|
||
|
return dequeue_event(client, buffer, count);
|
||
|
}
|
||
|
|
||
|
-/* caller must hold card->lock so that node pointers can be dereferenced here */
|
||
|
-static void
|
||
|
-fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
||
|
- struct client *client)
|
||
|
+static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
||
|
+ struct client *client)
|
||
|
{
|
||
|
struct fw_card *card = client->device->card;
|
||
|
|
||
|
+ spin_lock_irq(&card->lock);
|
||
|
+
|
||
|
event->closure = client->bus_reset_closure;
|
||
|
event->type = FW_CDEV_EVENT_BUS_RESET;
|
||
|
event->generation = client->device->generation;
|
||
|
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_even
|
||
|
event->bm_node_id = 0; /* FIXME: We don't track the BM. */
|
||
|
event->irm_node_id = card->irm_node->node_id;
|
||
|
event->root_node_id = card->root_node->node_id;
|
||
|
+
|
||
|
+ spin_unlock_irq(&card->lock);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-for_each_client(struct fw_device *device,
|
||
|
- void (*callback)(struct client *client))
|
||
|
+static void for_each_client(struct fw_device *device,
|
||
|
+ void (*callback)(struct client *client))
|
||
|
{
|
||
|
- struct fw_card *card = device->card;
|
||
|
struct client *c;
|
||
|
- unsigned long flags;
|
||
|
-
|
||
|
- spin_lock_irqsave(&card->lock, flags);
|
||
|
|
||
|
+ mutex_lock(&device->client_list_mutex);
|
||
|
list_for_each_entry(c, &device->client_list, link)
|
||
|
callback(c);
|
||
|
+ mutex_unlock(&device->client_list_mutex);
|
||
|
+}
|
||
|
+
|
||
|
+static int schedule_reallocations(int id, void *p, void *data)
|
||
|
+{
|
||
|
+ struct client_resource *r = p;
|
||
|
|
||
|
- spin_unlock_irqrestore(&card->lock, flags);
|
||
|
+ if (r->release == release_iso_resource)
|
||
|
+ schedule_iso_resource(container_of(r,
|
||
|
+ struct iso_resource, resource));
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-queue_bus_reset_event(struct client *client)
|
||
|
+static void queue_bus_reset_event(struct client *client)
|
||
|
{
|
||
|
- struct bus_reset *bus_reset;
|
||
|
+ struct bus_reset_event *e;
|
||
|
|
||
|
- bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
|
||
|
- if (bus_reset == NULL) {
|
||
|
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||
|
+ if (e == NULL) {
|
||
|
fw_notify("Out of memory when allocating bus reset event\n");
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
- fill_bus_reset_event(&bus_reset->reset, client);
|
||
|
+ fill_bus_reset_event(&e->reset, client);
|
||
|
+
|
||
|
+ queue_event(client, &e->event,
|
||
|
+ &e->reset, sizeof(e->reset), NULL, 0);
|
||
|
|
||
|
- queue_event(client, &bus_reset->event,
|
||
|
- &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ idr_for_each(&client->resource_idr, schedule_reallocations, client);
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
}
|
||
|
|
||
|
void fw_device_cdev_update(struct fw_device *device)
|
||
|
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client
|
||
|
{
|
||
|
struct fw_cdev_get_info *get_info = buffer;
|
||
|
struct fw_cdev_event_bus_reset bus_reset;
|
||
|
- struct fw_card *card = client->device->card;
|
||
|
unsigned long ret = 0;
|
||
|
|
||
|
client->version = get_info->version;
|
||
|
get_info->version = FW_CDEV_VERSION;
|
||
|
+ get_info->card = client->device->card->index;
|
||
|
|
||
|
down_read(&fw_device_rwsem);
|
||
|
|
||
|
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client
|
||
|
client->bus_reset_closure = get_info->bus_reset_closure;
|
||
|
if (get_info->bus_reset != 0) {
|
||
|
void __user *uptr = u64_to_uptr(get_info->bus_reset);
|
||
|
- unsigned long flags;
|
||
|
|
||
|
- spin_lock_irqsave(&card->lock, flags);
|
||
|
fill_bus_reset_event(&bus_reset, client);
|
||
|
- spin_unlock_irqrestore(&card->lock, flags);
|
||
|
-
|
||
|
if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
|
||
|
return -EFAULT;
|
||
|
}
|
||
|
|
||
|
- get_info->card = card->index;
|
||
|
-
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-add_client_resource(struct client *client, struct client_resource *resource)
|
||
|
+static int add_client_resource(struct client *client,
|
||
|
+ struct client_resource *resource, gfp_t gfp_mask)
|
||
|
{
|
||
|
unsigned long flags;
|
||
|
+ int ret;
|
||
|
+
|
||
|
+ retry:
|
||
|
+ if (idr_pre_get(&client->resource_idr, gfp_mask | __GFP_ZERO) == 0)
|
||
|
+ return -ENOMEM;
|
||
|
|
||
|
spin_lock_irqsave(&client->lock, flags);
|
||
|
- list_add_tail(&resource->link, &client->resource_list);
|
||
|
- resource->handle = client->resource_handle++;
|
||
|
+ if (client->in_shutdown)
|
||
|
+ ret = -ECANCELED;
|
||
|
+ else
|
||
|
+ ret = idr_get_new(&client->resource_idr, resource,
|
||
|
+ &resource->handle);
|
||
|
+ if (ret >= 0) {
|
||
|
+ client_get(client);
|
||
|
+ if (resource->release == release_iso_resource)
|
||
|
+ schedule_iso_resource(container_of(resource,
|
||
|
+ struct iso_resource, resource));
|
||
|
+ }
|
||
|
spin_unlock_irqrestore(&client->lock, flags);
|
||
|
+
|
||
|
+ if (ret == -EAGAIN)
|
||
|
+ goto retry;
|
||
|
+
|
||
|
+ return ret < 0 ? ret : 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-release_client_resource(struct client *client, u32 handle,
|
||
|
- struct client_resource **resource)
|
||
|
+static int release_client_resource(struct client *client, u32 handle,
|
||
|
+ client_resource_release_fn_t release,
|
||
|
+ struct client_resource **resource)
|
||
|
{
|
||
|
struct client_resource *r;
|
||
|
- unsigned long flags;
|
||
|
|
||
|
- spin_lock_irqsave(&client->lock, flags);
|
||
|
- list_for_each_entry(r, &client->resource_list, link) {
|
||
|
- if (r->handle == handle) {
|
||
|
- list_del(&r->link);
|
||
|
- break;
|
||
|
- }
|
||
|
- }
|
||
|
- spin_unlock_irqrestore(&client->lock, flags);
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ if (client->in_shutdown)
|
||
|
+ r = NULL;
|
||
|
+ else
|
||
|
+ r = idr_find(&client->resource_idr, handle);
|
||
|
+ if (r && r->release == release)
|
||
|
+ idr_remove(&client->resource_idr, handle);
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
|
||
|
- if (&r->link == &client->resource_list)
|
||
|
+ if (!(r && r->release == release))
|
||
|
return -EINVAL;
|
||
|
|
||
|
if (resource)
|
||
|
@@ -350,203 +448,242 @@ release_client_resource(struct client *c
|
||
|
else
|
||
|
r->release(client, r);
|
||
|
|
||
|
+ client_put(client);
|
||
|
+
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-release_transaction(struct client *client, struct client_resource *resource)
|
||
|
+static void release_transaction(struct client *client,
|
||
|
+ struct client_resource *resource)
|
||
|
{
|
||
|
- struct response *response =
|
||
|
- container_of(resource, struct response, resource);
|
||
|
+ struct outbound_transaction_resource *r = container_of(resource,
|
||
|
+ struct outbound_transaction_resource, resource);
|
||
|
|
||
|
- fw_cancel_transaction(client->device->card, &response->transaction);
|
||
|
+ fw_cancel_transaction(client->device->card, &r->transaction);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-complete_transaction(struct fw_card *card, int rcode,
|
||
|
- void *payload, size_t length, void *data)
|
||
|
+static void complete_transaction(struct fw_card *card, int rcode,
|
||
|
+ void *payload, size_t length, void *data)
|
||
|
{
|
||
|
- struct response *response = data;
|
||
|
- struct client *client = response->client;
|
||
|
+ struct outbound_transaction_event *e = data;
|
||
|
+ struct fw_cdev_event_response *rsp = &e->response;
|
||
|
+ struct client *client = e->client;
|
||
|
unsigned long flags;
|
||
|
- struct fw_cdev_event_response *r = &response->response;
|
||
|
|
||
|
- if (length < r->length)
|
||
|
- r->length = length;
|
||
|
+ if (length < rsp->length)
|
||
|
+ rsp->length = length;
|
||
|
if (rcode == RCODE_COMPLETE)
|
||
|
- memcpy(r->data, payload, r->length);
|
||
|
+ memcpy(rsp->data, payload, rsp->length);
|
||
|
|
||
|
spin_lock_irqsave(&client->lock, flags);
|
||
|
- list_del(&response->resource.link);
|
||
|
+ /*
|
||
|
+ * 1. If called while in shutdown, the idr tree must be left untouched.
|
||
|
+ * The idr handle will be removed and the client reference will be
|
||
|
+ * dropped later.
|
||
|
+ * 2. If the call chain was release_client_resource ->
|
||
|
+ * release_transaction -> complete_transaction (instead of a normal
|
||
|
+ * conclusion of the transaction), i.e. if this resource was already
|
||
|
+ * unregistered from the idr, the client reference will be dropped
|
||
|
+ * by release_client_resource and we must not drop it here.
|
||
|
+ */
|
||
|
+ if (!client->in_shutdown &&
|
||
|
+ idr_find(&client->resource_idr, e->r.resource.handle)) {
|
||
|
+ idr_remove(&client->resource_idr, e->r.resource.handle);
|
||
|
+ /* Drop the idr's reference */
|
||
|
+ client_put(client);
|
||
|
+ }
|
||
|
spin_unlock_irqrestore(&client->lock, flags);
|
||
|
|
||
|
- r->type = FW_CDEV_EVENT_RESPONSE;
|
||
|
- r->rcode = rcode;
|
||
|
+ rsp->type = FW_CDEV_EVENT_RESPONSE;
|
||
|
+ rsp->rcode = rcode;
|
||
|
|
||
|
/*
|
||
|
- * In the case that sizeof(*r) doesn't align with the position of the
|
||
|
+ * In the case that sizeof(*rsp) doesn't align with the position of the
|
||
|
* data, and the read is short, preserve an extra copy of the data
|
||
|
* to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
|
||
|
* for short reads and some apps depended on it, this is both safe
|
||
|
* and prudent for compatibility.
|
||
|
*/
|
||
|
- if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
|
||
|
- queue_event(client, &response->event, r, sizeof(*r),
|
||
|
- r->data, r->length);
|
||
|
+ if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
|
||
|
+ queue_event(client, &e->event, rsp, sizeof(*rsp),
|
||
|
+ rsp->data, rsp->length);
|
||
|
else
|
||
|
- queue_event(client, &response->event, r, sizeof(*r) + r->length,
|
||
|
+ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
|
||
|
NULL, 0);
|
||
|
+
|
||
|
+ /* Drop the transaction callback's reference */
|
||
|
+ client_put(client);
|
||
|
}
|
||
|
|
||
|
-static int ioctl_send_request(struct client *client, void *buffer)
|
||
|
+static int init_request(struct client *client,
|
||
|
+ struct fw_cdev_send_request *request,
|
||
|
+ int destination_id, int speed)
|
||
|
{
|
||
|
- struct fw_device *device = client->device;
|
||
|
- struct fw_cdev_send_request *request = buffer;
|
||
|
- struct response *response;
|
||
|
+ struct outbound_transaction_event *e;
|
||
|
+ int ret;
|
||
|
|
||
|
- /* What is the biggest size we'll accept, really? */
|
||
|
- if (request->length > 4096)
|
||
|
- return -EINVAL;
|
||
|
+ if (request->length > 4096 || request->length > 512 << speed)
|
||
|
+ return -EIO;
|
||
|
|
||
|
- response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
|
||
|
- if (response == NULL)
|
||
|
+ e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
|
||
|
+ if (e == NULL)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- response->client = client;
|
||
|
- response->response.length = request->length;
|
||
|
- response->response.closure = request->closure;
|
||
|
+ e->client = client;
|
||
|
+ e->response.length = request->length;
|
||
|
+ e->response.closure = request->closure;
|
||
|
|
||
|
if (request->data &&
|
||
|
- copy_from_user(response->response.data,
|
||
|
+ copy_from_user(e->response.data,
|
||
|
u64_to_uptr(request->data), request->length)) {
|
||
|
- kfree(response);
|
||
|
- return -EFAULT;
|
||
|
+ ret = -EFAULT;
|
||
|
+ goto failed;
|
||
|
}
|
||
|
|
||
|
- response->resource.release = release_transaction;
|
||
|
- add_client_resource(client, &response->resource);
|
||
|
-
|
||
|
- fw_send_request(device->card, &response->transaction,
|
||
|
- request->tcode & 0x1f,
|
||
|
- device->node->node_id,
|
||
|
- request->generation,
|
||
|
- device->max_speed,
|
||
|
- request->offset,
|
||
|
- response->response.data, request->length,
|
||
|
- complete_transaction, response);
|
||
|
+ e->r.resource.release = release_transaction;
|
||
|
+ ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
|
||
|
+ if (ret < 0)
|
||
|
+ goto failed;
|
||
|
+
|
||
|
+ /* Get a reference for the transaction callback */
|
||
|
+ client_get(client);
|
||
|
+
|
||
|
+ fw_send_request(client->device->card, &e->r.transaction,
|
||
|
+ request->tcode & 0x1f, destination_id,
|
||
|
+ request->generation, speed, request->offset,
|
||
|
+ e->response.data, request->length,
|
||
|
+ complete_transaction, e);
|
||
|
|
||
|
if (request->data)
|
||
|
return sizeof(request) + request->length;
|
||
|
else
|
||
|
return sizeof(request);
|
||
|
+ failed:
|
||
|
+ kfree(e);
|
||
|
+
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
-struct address_handler {
|
||
|
- struct fw_address_handler handler;
|
||
|
- __u64 closure;
|
||
|
- struct client *client;
|
||
|
- struct client_resource resource;
|
||
|
-};
|
||
|
+static int ioctl_send_request(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_send_request *request = buffer;
|
||
|
|
||
|
-struct request {
|
||
|
- struct fw_request *request;
|
||
|
- void *data;
|
||
|
- size_t length;
|
||
|
- struct client_resource resource;
|
||
|
-};
|
||
|
+ switch (request->tcode) {
|
||
|
+ case TCODE_WRITE_QUADLET_REQUEST:
|
||
|
+ case TCODE_WRITE_BLOCK_REQUEST:
|
||
|
+ case TCODE_READ_QUADLET_REQUEST:
|
||
|
+ case TCODE_READ_BLOCK_REQUEST:
|
||
|
+ case TCODE_LOCK_MASK_SWAP:
|
||
|
+ case TCODE_LOCK_COMPARE_SWAP:
|
||
|
+ case TCODE_LOCK_FETCH_ADD:
|
||
|
+ case TCODE_LOCK_LITTLE_ADD:
|
||
|
+ case TCODE_LOCK_BOUNDED_ADD:
|
||
|
+ case TCODE_LOCK_WRAP_ADD:
|
||
|
+ case TCODE_LOCK_VENDOR_DEPENDENT:
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
|
||
|
-struct request_event {
|
||
|
- struct event event;
|
||
|
- struct fw_cdev_event_request request;
|
||
|
-};
|
||
|
+ return init_request(client, request, client->device->node->node_id,
|
||
|
+ client->device->max_speed);
|
||
|
+}
|
||
|
|
||
|
-static void
|
||
|
-release_request(struct client *client, struct client_resource *resource)
|
||
|
+static void release_request(struct client *client,
|
||
|
+ struct client_resource *resource)
|
||
|
{
|
||
|
- struct request *request =
|
||
|
- container_of(resource, struct request, resource);
|
||
|
+ struct inbound_transaction_resource *r = container_of(resource,
|
||
|
+ struct inbound_transaction_resource, resource);
|
||
|
|
||
|
- fw_send_response(client->device->card, request->request,
|
||
|
+ fw_send_response(client->device->card, r->request,
|
||
|
RCODE_CONFLICT_ERROR);
|
||
|
- kfree(request);
|
||
|
+ kfree(r);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-handle_request(struct fw_card *card, struct fw_request *r,
|
||
|
- int tcode, int destination, int source,
|
||
|
- int generation, int speed,
|
||
|
- unsigned long long offset,
|
||
|
- void *payload, size_t length, void *callback_data)
|
||
|
-{
|
||
|
- struct address_handler *handler = callback_data;
|
||
|
- struct request *request;
|
||
|
- struct request_event *e;
|
||
|
- struct client *client = handler->client;
|
||
|
+static void handle_request(struct fw_card *card, struct fw_request *request,
|
||
|
+ int tcode, int destination, int source,
|
||
|
+ int generation, int speed,
|
||
|
+ unsigned long long offset,
|
||
|
+ void *payload, size_t length, void *callback_data)
|
||
|
+{
|
||
|
+ struct address_handler_resource *handler = callback_data;
|
||
|
+ struct inbound_transaction_resource *r;
|
||
|
+ struct inbound_transaction_event *e;
|
||
|
+ int ret;
|
||
|
|
||
|
- request = kmalloc(sizeof(*request), GFP_ATOMIC);
|
||
|
+ r = kmalloc(sizeof(*r), GFP_ATOMIC);
|
||
|
e = kmalloc(sizeof(*e), GFP_ATOMIC);
|
||
|
- if (request == NULL || e == NULL) {
|
||
|
- kfree(request);
|
||
|
- kfree(e);
|
||
|
- fw_send_response(card, r, RCODE_CONFLICT_ERROR);
|
||
|
- return;
|
||
|
- }
|
||
|
-
|
||
|
- request->request = r;
|
||
|
- request->data = payload;
|
||
|
- request->length = length;
|
||
|
+ if (r == NULL || e == NULL)
|
||
|
+ goto failed;
|
||
|
|
||
|
- request->resource.release = release_request;
|
||
|
- add_client_resource(client, &request->resource);
|
||
|
+ r->request = request;
|
||
|
+ r->data = payload;
|
||
|
+ r->length = length;
|
||
|
+
|
||
|
+ r->resource.release = release_request;
|
||
|
+ ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
|
||
|
+ if (ret < 0)
|
||
|
+ goto failed;
|
||
|
|
||
|
e->request.type = FW_CDEV_EVENT_REQUEST;
|
||
|
e->request.tcode = tcode;
|
||
|
e->request.offset = offset;
|
||
|
e->request.length = length;
|
||
|
- e->request.handle = request->resource.handle;
|
||
|
+ e->request.handle = r->resource.handle;
|
||
|
e->request.closure = handler->closure;
|
||
|
|
||
|
- queue_event(client, &e->event,
|
||
|
+ queue_event(handler->client, &e->event,
|
||
|
&e->request, sizeof(e->request), payload, length);
|
||
|
+ return;
|
||
|
+
|
||
|
+ failed:
|
||
|
+ kfree(r);
|
||
|
+ kfree(e);
|
||
|
+ fw_send_response(card, request, RCODE_CONFLICT_ERROR);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-release_address_handler(struct client *client,
|
||
|
- struct client_resource *resource)
|
||
|
+static void release_address_handler(struct client *client,
|
||
|
+ struct client_resource *resource)
|
||
|
{
|
||
|
- struct address_handler *handler =
|
||
|
- container_of(resource, struct address_handler, resource);
|
||
|
+ struct address_handler_resource *r =
|
||
|
+ container_of(resource, struct address_handler_resource, resource);
|
||
|
|
||
|
- fw_core_remove_address_handler(&handler->handler);
|
||
|
- kfree(handler);
|
||
|
+ fw_core_remove_address_handler(&r->handler);
|
||
|
+ kfree(r);
|
||
|
}
|
||
|
|
||
|
static int ioctl_allocate(struct client *client, void *buffer)
|
||
|
{
|
||
|
struct fw_cdev_allocate *request = buffer;
|
||
|
- struct address_handler *handler;
|
||
|
+ struct address_handler_resource *r;
|
||
|
struct fw_address_region region;
|
||
|
+ int ret;
|
||
|
|
||
|
- handler = kmalloc(sizeof(*handler), GFP_KERNEL);
|
||
|
- if (handler == NULL)
|
||
|
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
|
||
|
+ if (r == NULL)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
region.start = request->offset;
|
||
|
region.end = request->offset + request->length;
|
||
|
- handler->handler.length = request->length;
|
||
|
- handler->handler.address_callback = handle_request;
|
||
|
- handler->handler.callback_data = handler;
|
||
|
- handler->closure = request->closure;
|
||
|
- handler->client = client;
|
||
|
-
|
||
|
- if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) {
|
||
|
- kfree(handler);
|
||
|
- return -EBUSY;
|
||
|
+ r->handler.length = request->length;
|
||
|
+ r->handler.address_callback = handle_request;
|
||
|
+ r->handler.callback_data = r;
|
||
|
+ r->closure = request->closure;
|
||
|
+ r->client = client;
|
||
|
+
|
||
|
+ ret = fw_core_add_address_handler(&r->handler, ®ion);
|
||
|
+ if (ret < 0) {
|
||
|
+ kfree(r);
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
- handler->resource.release = release_address_handler;
|
||
|
- add_client_resource(client, &handler->resource);
|
||
|
- request->handle = handler->resource.handle;
|
||
|
+ r->resource.release = release_address_handler;
|
||
|
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
|
||
|
+ if (ret < 0) {
|
||
|
+ release_address_handler(client, &r->resource);
|
||
|
+ return ret;
|
||
|
+ }
|
||
|
+ request->handle = r->resource.handle;
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
@@ -555,18 +692,22 @@ static int ioctl_deallocate(struct clien
|
||
|
{
|
||
|
struct fw_cdev_deallocate *request = buffer;
|
||
|
|
||
|
- return release_client_resource(client, request->handle, NULL);
|
||
|
+ return release_client_resource(client, request->handle,
|
||
|
+ release_address_handler, NULL);
|
||
|
}
|
||
|
|
||
|
static int ioctl_send_response(struct client *client, void *buffer)
|
||
|
{
|
||
|
struct fw_cdev_send_response *request = buffer;
|
||
|
struct client_resource *resource;
|
||
|
- struct request *r;
|
||
|
+ struct inbound_transaction_resource *r;
|
||
|
|
||
|
- if (release_client_resource(client, request->handle, &resource) < 0)
|
||
|
+ if (release_client_resource(client, request->handle,
|
||
|
+ release_request, &resource) < 0)
|
||
|
return -EINVAL;
|
||
|
- r = container_of(resource, struct request, resource);
|
||
|
+
|
||
|
+ r = container_of(resource, struct inbound_transaction_resource,
|
||
|
+ resource);
|
||
|
if (request->length < r->length)
|
||
|
r->length = request->length;
|
||
|
if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
|
||
|
@@ -588,85 +729,84 @@ static int ioctl_initiate_bus_reset(stru
|
||
|
return fw_core_initiate_bus_reset(client->device->card, short_reset);
|
||
|
}
|
||
|
|
||
|
-struct descriptor {
|
||
|
- struct fw_descriptor d;
|
||
|
- struct client_resource resource;
|
||
|
- u32 data[0];
|
||
|
-};
|
||
|
-
|
||
|
static void release_descriptor(struct client *client,
|
||
|
struct client_resource *resource)
|
||
|
{
|
||
|
- struct descriptor *descriptor =
|
||
|
- container_of(resource, struct descriptor, resource);
|
||
|
+ struct descriptor_resource *r =
|
||
|
+ container_of(resource, struct descriptor_resource, resource);
|
||
|
|
||
|
- fw_core_remove_descriptor(&descriptor->d);
|
||
|
- kfree(descriptor);
|
||
|
+ fw_core_remove_descriptor(&r->descriptor);
|
||
|
+ kfree(r);
|
||
|
}
|
||
|
|
||
|
static int ioctl_add_descriptor(struct client *client, void *buffer)
|
||
|
{
|
||
|
struct fw_cdev_add_descriptor *request = buffer;
|
||
|
- struct descriptor *descriptor;
|
||
|
- int retval;
|
||
|
+ struct descriptor_resource *r;
|
||
|
+ int ret;
|
||
|
|
||
|
if (request->length > 256)
|
||
|
return -EINVAL;
|
||
|
|
||
|
- descriptor =
|
||
|
- kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
|
||
|
- if (descriptor == NULL)
|
||
|
+ r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
|
||
|
+ if (r == NULL)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- if (copy_from_user(descriptor->data,
|
||
|
+ if (copy_from_user(r->data,
|
||
|
u64_to_uptr(request->data), request->length * 4)) {
|
||
|
- kfree(descriptor);
|
||
|
- return -EFAULT;
|
||
|
+ ret = -EFAULT;
|
||
|
+ goto failed;
|
||
|
}
|
||
|
|
||
|
- descriptor->d.length = request->length;
|
||
|
- descriptor->d.immediate = request->immediate;
|
||
|
- descriptor->d.key = request->key;
|
||
|
- descriptor->d.data = descriptor->data;
|
||
|
-
|
||
|
- retval = fw_core_add_descriptor(&descriptor->d);
|
||
|
- if (retval < 0) {
|
||
|
- kfree(descriptor);
|
||
|
- return retval;
|
||
|
+ r->descriptor.length = request->length;
|
||
|
+ r->descriptor.immediate = request->immediate;
|
||
|
+ r->descriptor.key = request->key;
|
||
|
+ r->descriptor.data = r->data;
|
||
|
+
|
||
|
+ ret = fw_core_add_descriptor(&r->descriptor);
|
||
|
+ if (ret < 0)
|
||
|
+ goto failed;
|
||
|
+
|
||
|
+ r->resource.release = release_descriptor;
|
||
|
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
|
||
|
+ if (ret < 0) {
|
||
|
+ fw_core_remove_descriptor(&r->descriptor);
|
||
|
+ goto failed;
|
||
|
}
|
||
|
-
|
||
|
- descriptor->resource.release = release_descriptor;
|
||
|
- add_client_resource(client, &descriptor->resource);
|
||
|
- request->handle = descriptor->resource.handle;
|
||
|
+ request->handle = r->resource.handle;
|
||
|
|
||
|
return 0;
|
||
|
+ failed:
|
||
|
+ kfree(r);
|
||
|
+
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static int ioctl_remove_descriptor(struct client *client, void *buffer)
|
||
|
{
|
||
|
struct fw_cdev_remove_descriptor *request = buffer;
|
||
|
|
||
|
- return release_client_resource(client, request->handle, NULL);
|
||
|
+ return release_client_resource(client, request->handle,
|
||
|
+ release_descriptor, NULL);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-iso_callback(struct fw_iso_context *context, u32 cycle,
|
||
|
- size_t header_length, void *header, void *data)
|
||
|
+static void iso_callback(struct fw_iso_context *context, u32 cycle,
|
||
|
+ size_t header_length, void *header, void *data)
|
||
|
{
|
||
|
struct client *client = data;
|
||
|
- struct iso_interrupt *irq;
|
||
|
+ struct iso_interrupt_event *e;
|
||
|
|
||
|
- irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
|
||
|
- if (irq == NULL)
|
||
|
+ e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
|
||
|
+ if (e == NULL)
|
||
|
return;
|
||
|
|
||
|
- irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
|
||
|
- irq->interrupt.closure = client->iso_closure;
|
||
|
- irq->interrupt.cycle = cycle;
|
||
|
- irq->interrupt.header_length = header_length;
|
||
|
- memcpy(irq->interrupt.header, header, header_length);
|
||
|
- queue_event(client, &irq->event, &irq->interrupt,
|
||
|
- sizeof(irq->interrupt) + header_length, NULL, 0);
|
||
|
+ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
|
||
|
+ e->interrupt.closure = client->iso_closure;
|
||
|
+ e->interrupt.cycle = cycle;
|
||
|
+ e->interrupt.header_length = header_length;
|
||
|
+ memcpy(e->interrupt.header, header, header_length);
|
||
|
+ queue_event(client, &e->event, &e->interrupt,
|
||
|
+ sizeof(e->interrupt) + header_length, NULL, 0);
|
||
|
}
|
||
|
|
||
|
static int ioctl_create_iso_context(struct client *client, void *buffer)
|
||
|
@@ -871,6 +1011,237 @@ static int ioctl_get_cycle_timer(struct
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
+static void iso_resource_work(struct work_struct *work)
|
||
|
+{
|
||
|
+ struct iso_resource_event *e;
|
||
|
+ struct iso_resource *r =
|
||
|
+ container_of(work, struct iso_resource, work.work);
|
||
|
+ struct client *client = r->client;
|
||
|
+ int generation, channel, bandwidth, todo;
|
||
|
+ bool skip, free, success;
|
||
|
+
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ generation = client->device->generation;
|
||
|
+ todo = r->todo;
|
||
|
+ /* Allow 1000ms grace period for other reallocations. */
|
||
|
+ if (todo == ISO_RES_ALLOC &&
|
||
|
+ time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
|
||
|
+ if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
|
||
|
+ client_get(client);
|
||
|
+ skip = true;
|
||
|
+ } else {
|
||
|
+ /* We could be called twice within the same generation. */
|
||
|
+ skip = todo == ISO_RES_REALLOC &&
|
||
|
+ r->generation == generation;
|
||
|
+ }
|
||
|
+ free = todo == ISO_RES_DEALLOC ||
|
||
|
+ todo == ISO_RES_ALLOC_ONCE ||
|
||
|
+ todo == ISO_RES_DEALLOC_ONCE;
|
||
|
+ r->generation = generation;
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
+
|
||
|
+ if (skip)
|
||
|
+ goto out;
|
||
|
+
|
||
|
+ bandwidth = r->bandwidth;
|
||
|
+
|
||
|
+ fw_iso_resource_manage(client->device->card, generation,
|
||
|
+ r->channels, &channel, &bandwidth,
|
||
|
+ todo == ISO_RES_ALLOC ||
|
||
|
+ todo == ISO_RES_REALLOC ||
|
||
|
+ todo == ISO_RES_ALLOC_ONCE);
|
||
|
+ /*
|
||
|
+ * Is this generation outdated already? As long as this resource sticks
|
||
|
+ * in the idr, it will be scheduled again for a newer generation or at
|
||
|
+ * shutdown.
|
||
|
+ */
|
||
|
+ if (channel == -EAGAIN &&
|
||
|
+ (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
|
||
|
+ goto out;
|
||
|
+
|
||
|
+ success = channel >= 0 || bandwidth > 0;
|
||
|
+
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ /*
|
||
|
+ * Transit from allocation to reallocation, except if the client
|
||
|
+ * requested deallocation in the meantime.
|
||
|
+ */
|
||
|
+ if (r->todo == ISO_RES_ALLOC)
|
||
|
+ r->todo = ISO_RES_REALLOC;
|
||
|
+ /*
|
||
|
+ * Allocation or reallocation failure? Pull this resource out of the
|
||
|
+ * idr and prepare for deletion, unless the client is shutting down.
|
||
|
+ */
|
||
|
+ if (r->todo == ISO_RES_REALLOC && !success &&
|
||
|
+ !client->in_shutdown &&
|
||
|
+ idr_find(&client->resource_idr, r->resource.handle)) {
|
||
|
+ idr_remove(&client->resource_idr, r->resource.handle);
|
||
|
+ client_put(client);
|
||
|
+ free = true;
|
||
|
+ }
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
+
|
||
|
+ if (todo == ISO_RES_ALLOC && channel >= 0)
|
||
|
+ r->channels = 1ULL << channel;
|
||
|
+
|
||
|
+ if (todo == ISO_RES_REALLOC && success)
|
||
|
+ goto out;
|
||
|
+
|
||
|
+ if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
|
||
|
+ e = r->e_alloc;
|
||
|
+ r->e_alloc = NULL;
|
||
|
+ } else {
|
||
|
+ e = r->e_dealloc;
|
||
|
+ r->e_dealloc = NULL;
|
||
|
+ }
|
||
|
+ e->resource.handle = r->resource.handle;
|
||
|
+ e->resource.channel = channel;
|
||
|
+ e->resource.bandwidth = bandwidth;
|
||
|
+
|
||
|
+ queue_event(client, &e->event,
|
||
|
+ &e->resource, sizeof(e->resource), NULL, 0);
|
||
|
+
|
||
|
+ if (free) {
|
||
|
+ cancel_delayed_work(&r->work);
|
||
|
+ kfree(r->e_alloc);
|
||
|
+ kfree(r->e_dealloc);
|
||
|
+ kfree(r);
|
||
|
+ }
|
||
|
+ out:
|
||
|
+ client_put(client);
|
||
|
+}
|
||
|
+
|
||
|
+static void schedule_iso_resource(struct iso_resource *r)
|
||
|
+{
|
||
|
+ client_get(r->client);
|
||
|
+ if (!schedule_delayed_work(&r->work, 0))
|
||
|
+ client_put(r->client);
|
||
|
+}
|
||
|
+
|
||
|
+static void release_iso_resource(struct client *client,
|
||
|
+ struct client_resource *resource)
|
||
|
+{
|
||
|
+ struct iso_resource *r =
|
||
|
+ container_of(resource, struct iso_resource, resource);
|
||
|
+
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ r->todo = ISO_RES_DEALLOC;
|
||
|
+ schedule_iso_resource(r);
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
+}
|
||
|
+
|
||
|
+static int init_iso_resource(struct client *client,
|
||
|
+ struct fw_cdev_allocate_iso_resource *request, int todo)
|
||
|
+{
|
||
|
+ struct iso_resource_event *e1, *e2;
|
||
|
+ struct iso_resource *r;
|
||
|
+ int ret;
|
||
|
+
|
||
|
+ if ((request->channels == 0 && request->bandwidth == 0) ||
|
||
|
+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
|
||
|
+ request->bandwidth < 0)
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
|
||
|
+ e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
|
||
|
+ e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
|
||
|
+ if (r == NULL || e1 == NULL || e2 == NULL) {
|
||
|
+ ret = -ENOMEM;
|
||
|
+ goto fail;
|
||
|
+ }
|
||
|
+
|
||
|
+ INIT_DELAYED_WORK(&r->work, iso_resource_work);
|
||
|
+ r->client = client;
|
||
|
+ r->todo = todo;
|
||
|
+ r->generation = -1;
|
||
|
+ r->channels = request->channels;
|
||
|
+ r->bandwidth = request->bandwidth;
|
||
|
+ r->e_alloc = e1;
|
||
|
+ r->e_dealloc = e2;
|
||
|
+
|
||
|
+ e1->resource.closure = request->closure;
|
||
|
+ e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
|
||
|
+ e2->resource.closure = request->closure;
|
||
|
+ e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
|
||
|
+
|
||
|
+ if (todo == ISO_RES_ALLOC) {
|
||
|
+ r->resource.release = release_iso_resource;
|
||
|
+ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
|
||
|
+ if (ret < 0)
|
||
|
+ goto fail;
|
||
|
+ } else {
|
||
|
+ r->resource.release = NULL;
|
||
|
+ r->resource.handle = -1;
|
||
|
+ schedule_iso_resource(r);
|
||
|
+ }
|
||
|
+ request->handle = r->resource.handle;
|
||
|
+
|
||
|
+ return 0;
|
||
|
+ fail:
|
||
|
+ kfree(r);
|
||
|
+ kfree(e1);
|
||
|
+ kfree(e2);
|
||
|
+
|
||
|
+ return ret;
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_allocate_iso_resource *request = buffer;
|
||
|
+
|
||
|
+ return init_iso_resource(client, request, ISO_RES_ALLOC);
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_deallocate *request = buffer;
|
||
|
+
|
||
|
+ return release_client_resource(client, request->handle,
|
||
|
+ release_iso_resource, NULL);
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_allocate_iso_resource *request = buffer;
|
||
|
+
|
||
|
+ return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_allocate_iso_resource *request = buffer;
|
||
|
+
|
||
|
+ return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_get_speed(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_get_speed *request = buffer;
|
||
|
+
|
||
|
+ request->max_speed = client->device->max_speed;
|
||
|
+
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
+static int ioctl_send_broadcast_request(struct client *client, void *buffer)
|
||
|
+{
|
||
|
+ struct fw_cdev_send_request *request = buffer;
|
||
|
+
|
||
|
+ switch (request->tcode) {
|
||
|
+ case TCODE_WRITE_QUADLET_REQUEST:
|
||
|
+ case TCODE_WRITE_BLOCK_REQUEST:
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
+
|
||
|
+ /* Security policy: Only allow accesses to Units Space. */
|
||
|
+ if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
|
||
|
+ return -EACCES;
|
||
|
+
|
||
|
+ return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
|
||
|
+}
|
||
|
+
|
||
|
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
|
||
|
ioctl_get_info,
|
||
|
ioctl_send_request,
|
||
|
@@ -885,13 +1256,19 @@ static int (* const ioctl_handlers[])(st
|
||
|
ioctl_start_iso,
|
||
|
ioctl_stop_iso,
|
||
|
ioctl_get_cycle_timer,
|
||
|
+ ioctl_allocate_iso_resource,
|
||
|
+ ioctl_deallocate_iso_resource,
|
||
|
+ ioctl_allocate_iso_resource_once,
|
||
|
+ ioctl_deallocate_iso_resource_once,
|
||
|
+ ioctl_get_speed,
|
||
|
+ ioctl_send_broadcast_request,
|
||
|
};
|
||
|
|
||
|
-static int
|
||
|
-dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
|
||
|
+static int dispatch_ioctl(struct client *client,
|
||
|
+ unsigned int cmd, void __user *arg)
|
||
|
{
|
||
|
char buffer[256];
|
||
|
- int retval;
|
||
|
+ int ret;
|
||
|
|
||
|
if (_IOC_TYPE(cmd) != '#' ||
|
||
|
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
|
||
|
@@ -903,9 +1280,9 @@ dispatch_ioctl(struct client *client, un
|
||
|
return -EFAULT;
|
||
|
}
|
||
|
|
||
|
- retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
|
||
|
- if (retval < 0)
|
||
|
- return retval;
|
||
|
+ ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
|
||
|
+ if (ret < 0)
|
||
|
+ return ret;
|
||
|
|
||
|
if (_IOC_DIR(cmd) & _IOC_READ) {
|
||
|
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
|
||
|
@@ -913,12 +1290,11 @@ dispatch_ioctl(struct client *client, un
|
||
|
return -EFAULT;
|
||
|
}
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
-static long
|
||
|
-fw_device_op_ioctl(struct file *file,
|
||
|
- unsigned int cmd, unsigned long arg)
|
||
|
+static long fw_device_op_ioctl(struct file *file,
|
||
|
+ unsigned int cmd, unsigned long arg)
|
||
|
{
|
||
|
struct client *client = file->private_data;
|
||
|
|
||
|
@@ -929,9 +1305,8 @@ fw_device_op_ioctl(struct file *file,
|
||
|
}
|
||
|
|
||
|
#ifdef CONFIG_COMPAT
|
||
|
-static long
|
||
|
-fw_device_op_compat_ioctl(struct file *file,
|
||
|
- unsigned int cmd, unsigned long arg)
|
||
|
+static long fw_device_op_compat_ioctl(struct file *file,
|
||
|
+ unsigned int cmd, unsigned long arg)
|
||
|
{
|
||
|
struct client *client = file->private_data;
|
||
|
|
||
|
@@ -947,7 +1322,7 @@ static int fw_device_op_mmap(struct file
|
||
|
struct client *client = file->private_data;
|
||
|
enum dma_data_direction direction;
|
||
|
unsigned long size;
|
||
|
- int page_count, retval;
|
||
|
+ int page_count, ret;
|
||
|
|
||
|
if (fw_device_is_shutdown(client->device))
|
||
|
return -ENODEV;
|
||
|
@@ -973,48 +1348,57 @@ static int fw_device_op_mmap(struct file
|
||
|
else
|
||
|
direction = DMA_FROM_DEVICE;
|
||
|
|
||
|
- retval = fw_iso_buffer_init(&client->buffer, client->device->card,
|
||
|
- page_count, direction);
|
||
|
- if (retval < 0)
|
||
|
- return retval;
|
||
|
+ ret = fw_iso_buffer_init(&client->buffer, client->device->card,
|
||
|
+ page_count, direction);
|
||
|
+ if (ret < 0)
|
||
|
+ return ret;
|
||
|
|
||
|
- retval = fw_iso_buffer_map(&client->buffer, vma);
|
||
|
- if (retval < 0)
|
||
|
+ ret = fw_iso_buffer_map(&client->buffer, vma);
|
||
|
+ if (ret < 0)
|
||
|
fw_iso_buffer_destroy(&client->buffer, client->device->card);
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
+}
|
||
|
+
|
||
|
+static int shutdown_resource(int id, void *p, void *data)
|
||
|
+{
|
||
|
+ struct client_resource *r = p;
|
||
|
+ struct client *client = data;
|
||
|
+
|
||
|
+ r->release(client, r);
|
||
|
+ client_put(client);
|
||
|
+
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
static int fw_device_op_release(struct inode *inode, struct file *file)
|
||
|
{
|
||
|
struct client *client = file->private_data;
|
||
|
struct event *e, *next_e;
|
||
|
- struct client_resource *r, *next_r;
|
||
|
- unsigned long flags;
|
||
|
|
||
|
- if (client->buffer.pages)
|
||
|
- fw_iso_buffer_destroy(&client->buffer, client->device->card);
|
||
|
+ mutex_lock(&client->device->client_list_mutex);
|
||
|
+ list_del(&client->link);
|
||
|
+ mutex_unlock(&client->device->client_list_mutex);
|
||
|
|
||
|
if (client->iso_context)
|
||
|
fw_iso_context_destroy(client->iso_context);
|
||
|
|
||
|
- list_for_each_entry_safe(r, next_r, &client->resource_list, link)
|
||
|
- r->release(client, r);
|
||
|
+ if (client->buffer.pages)
|
||
|
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
|
||
|
|
||
|
- /*
|
||
|
- * FIXME: We should wait for the async tasklets to stop
|
||
|
- * running before freeing the memory.
|
||
|
- */
|
||
|
+ /* Freeze client->resource_idr and client->event_list */
|
||
|
+ spin_lock_irq(&client->lock);
|
||
|
+ client->in_shutdown = true;
|
||
|
+ spin_unlock_irq(&client->lock);
|
||
|
+
|
||
|
+ idr_for_each(&client->resource_idr, shutdown_resource, client);
|
||
|
+ idr_remove_all(&client->resource_idr);
|
||
|
+ idr_destroy(&client->resource_idr);
|
||
|
|
||
|
list_for_each_entry_safe(e, next_e, &client->event_list, link)
|
||
|
kfree(e);
|
||
|
|
||
|
- spin_lock_irqsave(&client->device->card->lock, flags);
|
||
|
- list_del(&client->link);
|
||
|
- spin_unlock_irqrestore(&client->device->card->lock, flags);
|
||
|
-
|
||
|
- fw_device_put(client->device);
|
||
|
- kfree(client);
|
||
|
+ client_put(client);
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-device.c firewire-git/drivers/firewire/fw-device.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-device.c 2009-01-30 13:39:02.989651512 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-device.c 2009-01-30 13:35:51.860646788 -0500
|
||
|
@@ -27,8 +27,10 @@
|
||
|
#include <linux/idr.h>
|
||
|
#include <linux/jiffies.h>
|
||
|
#include <linux/string.h>
|
||
|
+#include <linux/mutex.h>
|
||
|
#include <linux/rwsem.h>
|
||
|
#include <linux/semaphore.h>
|
||
|
+#include <linux/spinlock.h>
|
||
|
#include <asm/system.h>
|
||
|
#include <linux/ctype.h>
|
||
|
#include "fw-transaction.h"
|
||
|
@@ -132,8 +134,7 @@ static int get_modalias(struct fw_unit *
|
||
|
vendor, model, specifier_id, version);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||
|
+static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||
|
{
|
||
|
struct fw_unit *unit = fw_unit(dev);
|
||
|
char modalias[64];
|
||
|
@@ -191,8 +192,8 @@ struct config_rom_attribute {
|
||
|
u32 key;
|
||
|
};
|
||
|
|
||
|
-static ssize_t
|
||
|
-show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
|
||
|
+static ssize_t show_immediate(struct device *dev,
|
||
|
+ struct device_attribute *dattr, char *buf)
|
||
|
{
|
||
|
struct config_rom_attribute *attr =
|
||
|
container_of(dattr, struct config_rom_attribute, attr);
|
||
|
@@ -223,8 +224,8 @@ show_immediate(struct device *dev, struc
|
||
|
#define IMMEDIATE_ATTR(name, key) \
|
||
|
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
|
||
|
|
||
|
-static ssize_t
|
||
|
-show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
|
||
|
+static ssize_t show_text_leaf(struct device *dev,
|
||
|
+ struct device_attribute *dattr, char *buf)
|
||
|
{
|
||
|
struct config_rom_attribute *attr =
|
||
|
container_of(dattr, struct config_rom_attribute, attr);
|
||
|
@@ -293,10 +294,9 @@ static struct config_rom_attribute confi
|
||
|
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
|
||
|
};
|
||
|
|
||
|
-static void
|
||
|
-init_fw_attribute_group(struct device *dev,
|
||
|
- struct device_attribute *attrs,
|
||
|
- struct fw_attribute_group *group)
|
||
|
+static void init_fw_attribute_group(struct device *dev,
|
||
|
+ struct device_attribute *attrs,
|
||
|
+ struct fw_attribute_group *group)
|
||
|
{
|
||
|
struct device_attribute *attr;
|
||
|
int i, j;
|
||
|
@@ -319,9 +319,8 @@ init_fw_attribute_group(struct device *d
|
||
|
dev->groups = group->groups;
|
||
|
}
|
||
|
|
||
|
-static ssize_t
|
||
|
-modalias_show(struct device *dev,
|
||
|
- struct device_attribute *attr, char *buf)
|
||
|
+static ssize_t modalias_show(struct device *dev,
|
||
|
+ struct device_attribute *attr, char *buf)
|
||
|
{
|
||
|
struct fw_unit *unit = fw_unit(dev);
|
||
|
int length;
|
||
|
@@ -332,9 +331,8 @@ modalias_show(struct device *dev,
|
||
|
return length + 1;
|
||
|
}
|
||
|
|
||
|
-static ssize_t
|
||
|
-rom_index_show(struct device *dev,
|
||
|
- struct device_attribute *attr, char *buf)
|
||
|
+static ssize_t rom_index_show(struct device *dev,
|
||
|
+ struct device_attribute *attr, char *buf)
|
||
|
{
|
||
|
struct fw_device *device = fw_device(dev->parent);
|
||
|
struct fw_unit *unit = fw_unit(dev);
|
||
|
@@ -349,8 +347,8 @@ static struct device_attribute fw_unit_a
|
||
|
__ATTR_NULL,
|
||
|
};
|
||
|
|
||
|
-static ssize_t
|
||
|
-config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||
|
+static ssize_t config_rom_show(struct device *dev,
|
||
|
+ struct device_attribute *attr, char *buf)
|
||
|
{
|
||
|
struct fw_device *device = fw_device(dev);
|
||
|
size_t length;
|
||
|
@@ -363,8 +361,8 @@ config_rom_show(struct device *dev, stru
|
||
|
return length;
|
||
|
}
|
||
|
|
||
|
-static ssize_t
|
||
|
-guid_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||
|
+static ssize_t guid_show(struct device *dev,
|
||
|
+ struct device_attribute *attr, char *buf)
|
||
|
{
|
||
|
struct fw_device *device = fw_device(dev);
|
||
|
int ret;
|
||
|
@@ -383,8 +381,8 @@ static struct device_attribute fw_device
|
||
|
__ATTR_NULL,
|
||
|
};
|
||
|
|
||
|
-static int
|
||
|
-read_rom(struct fw_device *device, int generation, int index, u32 *data)
|
||
|
+static int read_rom(struct fw_device *device,
|
||
|
+ int generation, int index, u32 *data)
|
||
|
{
|
||
|
int rcode;
|
||
|
|
||
|
@@ -1004,6 +1002,7 @@ void fw_node_event(struct fw_card *card,
|
||
|
device->node = fw_node_get(node);
|
||
|
device->node_id = node->node_id;
|
||
|
device->generation = card->generation;
|
||
|
+ mutex_init(&device->client_list_mutex);
|
||
|
INIT_LIST_HEAD(&device->client_list);
|
||
|
|
||
|
/*
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-device.h firewire-git/drivers/firewire/fw-device.h
|
||
|
--- linux-2.6-git/drivers/firewire/fw-device.h 2009-01-30 13:39:02.989651512 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-device.h 2009-01-30 13:35:51.860646788 -0500
|
||
|
@@ -23,6 +23,7 @@
|
||
|
#include <linux/cdev.h>
|
||
|
#include <linux/idr.h>
|
||
|
#include <linux/rwsem.h>
|
||
|
+#include <linux/mutex.h>
|
||
|
#include <asm/atomic.h>
|
||
|
|
||
|
enum fw_device_state {
|
||
|
@@ -64,7 +65,10 @@ struct fw_device {
|
||
|
bool cmc;
|
||
|
struct fw_card *card;
|
||
|
struct device device;
|
||
|
+
|
||
|
+ struct mutex client_list_mutex;
|
||
|
struct list_head client_list;
|
||
|
+
|
||
|
u32 *config_rom;
|
||
|
size_t config_rom_length;
|
||
|
int config_rom_retries;
|
||
|
@@ -176,8 +180,7 @@ struct fw_driver {
|
||
|
const struct fw_device_id *id_table;
|
||
|
};
|
||
|
|
||
|
-static inline struct fw_driver *
|
||
|
-fw_driver(struct device_driver *drv)
|
||
|
+static inline struct fw_driver *fw_driver(struct device_driver *drv)
|
||
|
{
|
||
|
return container_of(drv, struct fw_driver, driver);
|
||
|
}
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-iso.c firewire-git/drivers/firewire/fw-iso.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-iso.c 2008-11-04 11:18:33.000000000 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-iso.c 2009-01-30 13:35:51.860646788 -0500
|
||
|
@@ -1,5 +1,7 @@
|
||
|
/*
|
||
|
- * Isochronous IO functionality
|
||
|
+ * Isochronous I/O functionality:
|
||
|
+ * - Isochronous DMA context management
|
||
|
+ * - Isochronous bus resource management (channels, bandwidth), client side
|
||
|
*
|
||
|
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
|
||
|
*
|
||
|
@@ -18,21 +20,25 @@
|
||
|
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||
|
*/
|
||
|
|
||
|
-#include <linux/kernel.h>
|
||
|
-#include <linux/module.h>
|
||
|
#include <linux/dma-mapping.h>
|
||
|
-#include <linux/vmalloc.h>
|
||
|
+#include <linux/errno.h>
|
||
|
+#include <linux/firewire-constants.h>
|
||
|
+#include <linux/kernel.h>
|
||
|
#include <linux/mm.h>
|
||
|
+#include <linux/spinlock.h>
|
||
|
+#include <linux/vmalloc.h>
|
||
|
|
||
|
-#include "fw-transaction.h"
|
||
|
#include "fw-topology.h"
|
||
|
-#include "fw-device.h"
|
||
|
+#include "fw-transaction.h"
|
||
|
|
||
|
-int
|
||
|
-fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||
|
- int page_count, enum dma_data_direction direction)
|
||
|
+/*
|
||
|
+ * Isochronous DMA context management
|
||
|
+ */
|
||
|
+
|
||
|
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||
|
+ int page_count, enum dma_data_direction direction)
|
||
|
{
|
||
|
- int i, j, retval = -ENOMEM;
|
||
|
+ int i, j;
|
||
|
dma_addr_t address;
|
||
|
|
||
|
buffer->page_count = page_count;
|
||
|
@@ -69,19 +75,19 @@ fw_iso_buffer_init(struct fw_iso_buffer
|
||
|
kfree(buffer->pages);
|
||
|
out:
|
||
|
buffer->pages = NULL;
|
||
|
- return retval;
|
||
|
+ return -ENOMEM;
|
||
|
}
|
||
|
|
||
|
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
|
||
|
{
|
||
|
unsigned long uaddr;
|
||
|
- int i, retval;
|
||
|
+ int i, ret;
|
||
|
|
||
|
uaddr = vma->vm_start;
|
||
|
for (i = 0; i < buffer->page_count; i++) {
|
||
|
- retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
|
||
|
- if (retval)
|
||
|
- return retval;
|
||
|
+ ret = vm_insert_page(vma, uaddr, buffer->pages[i]);
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
uaddr += PAGE_SIZE;
|
||
|
}
|
||
|
|
||
|
@@ -105,14 +111,14 @@ void fw_iso_buffer_destroy(struct fw_iso
|
||
|
buffer->pages = NULL;
|
||
|
}
|
||
|
|
||
|
-struct fw_iso_context *
|
||
|
-fw_iso_context_create(struct fw_card *card, int type,
|
||
|
- int channel, int speed, size_t header_size,
|
||
|
- fw_iso_callback_t callback, void *callback_data)
|
||
|
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
|
||
|
+ int type, int channel, int speed, size_t header_size,
|
||
|
+ fw_iso_callback_t callback, void *callback_data)
|
||
|
{
|
||
|
struct fw_iso_context *ctx;
|
||
|
|
||
|
- ctx = card->driver->allocate_iso_context(card, type, header_size);
|
||
|
+ ctx = card->driver->allocate_iso_context(card,
|
||
|
+ type, channel, header_size);
|
||
|
if (IS_ERR(ctx))
|
||
|
return ctx;
|
||
|
|
||
|
@@ -134,25 +140,186 @@ void fw_iso_context_destroy(struct fw_is
|
||
|
card->driver->free_iso_context(ctx);
|
||
|
}
|
||
|
|
||
|
-int
|
||
|
-fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
|
||
|
+int fw_iso_context_start(struct fw_iso_context *ctx,
|
||
|
+ int cycle, int sync, int tags)
|
||
|
{
|
||
|
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
|
||
|
}
|
||
|
|
||
|
-int
|
||
|
-fw_iso_context_queue(struct fw_iso_context *ctx,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload)
|
||
|
+int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload)
|
||
|
{
|
||
|
struct fw_card *card = ctx->card;
|
||
|
|
||
|
return card->driver->queue_iso(ctx, packet, buffer, payload);
|
||
|
}
|
||
|
|
||
|
-int
|
||
|
-fw_iso_context_stop(struct fw_iso_context *ctx)
|
||
|
+int fw_iso_context_stop(struct fw_iso_context *ctx)
|
||
|
{
|
||
|
return ctx->card->driver->stop_iso(ctx);
|
||
|
}
|
||
|
+
|
||
|
+/*
|
||
|
+ * Isochronous bus resource management (channels, bandwidth), client side
|
||
|
+ */
|
||
|
+
|
||
|
+static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
|
||
|
+ int bandwidth, bool allocate)
|
||
|
+{
|
||
|
+ __be32 data[2];
|
||
|
+ int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * On a 1394a IRM with low contention, try < 1 is enough.
|
||
|
+ * On a 1394-1995 IRM, we need at least try < 2.
|
||
|
+ * Let's just do try < 5.
|
||
|
+ */
|
||
|
+ for (try = 0; try < 5; try++) {
|
||
|
+ new = allocate ? old - bandwidth : old + bandwidth;
|
||
|
+ if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
|
||
|
+ break;
|
||
|
+
|
||
|
+ data[0] = cpu_to_be32(old);
|
||
|
+ data[1] = cpu_to_be32(new);
|
||
|
+ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
|
||
|
+ irm_id, generation, SCODE_100,
|
||
|
+ CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
|
||
|
+ data, sizeof(data))) {
|
||
|
+ case RCODE_GENERATION:
|
||
|
+ /* A generation change frees all bandwidth. */
|
||
|
+ return allocate ? -EAGAIN : bandwidth;
|
||
|
+
|
||
|
+ case RCODE_COMPLETE:
|
||
|
+ if (be32_to_cpup(data) == old)
|
||
|
+ return bandwidth;
|
||
|
+
|
||
|
+ old = be32_to_cpup(data);
|
||
|
+ /* Fall through. */
|
||
|
+ }
|
||
|
+ }
|
||
|
+
|
||
|
+ return -EIO;
|
||
|
+}
|
||
|
+
|
||
|
+static int manage_channel(struct fw_card *card, int irm_id, int generation,
|
||
|
+ u32 channels_mask, u64 offset, bool allocate)
|
||
|
+{
|
||
|
+ __be32 data[2], c, all, old;
|
||
|
+ int i, retry = 5;
|
||
|
+
|
||
|
+ old = all = allocate ? cpu_to_be32(~0) : 0;
|
||
|
+
|
||
|
+ for (i = 0; i < 32; i++) {
|
||
|
+ if (!(channels_mask & 1 << i))
|
||
|
+ continue;
|
||
|
+
|
||
|
+ c = cpu_to_be32(1 << (31 - i));
|
||
|
+ if ((old & c) != (all & c))
|
||
|
+ continue;
|
||
|
+
|
||
|
+ data[0] = old;
|
||
|
+ data[1] = old ^ c;
|
||
|
+ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
|
||
|
+ irm_id, generation, SCODE_100,
|
||
|
+ offset, data, sizeof(data))) {
|
||
|
+ case RCODE_GENERATION:
|
||
|
+ /* A generation change frees all channels. */
|
||
|
+ return allocate ? -EAGAIN : i;
|
||
|
+
|
||
|
+ case RCODE_COMPLETE:
|
||
|
+ if (data[0] == old)
|
||
|
+ return i;
|
||
|
+
|
||
|
+ old = data[0];
|
||
|
+
|
||
|
+ /* Is the IRM 1394a-2000 compliant? */
|
||
|
+ if ((data[0] & c) == (data[1] & c))
|
||
|
+ continue;
|
||
|
+
|
||
|
+ /* 1394-1995 IRM, fall through to retry. */
|
||
|
+ default:
|
||
|
+ if (retry--)
|
||
|
+ i--;
|
||
|
+ }
|
||
|
+ }
|
||
|
+
|
||
|
+ return -EIO;
|
||
|
+}
|
||
|
+
|
||
|
+static void deallocate_channel(struct fw_card *card, int irm_id,
|
||
|
+ int generation, int channel)
|
||
|
+{
|
||
|
+ u32 mask;
|
||
|
+ u64 offset;
|
||
|
+
|
||
|
+ mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
|
||
|
+ offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
|
||
|
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
|
||
|
+
|
||
|
+ manage_channel(card, irm_id, generation, mask, offset, false);
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
+ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
|
||
|
+ *
|
||
|
+ * In parameters: card, generation, channels_mask, bandwidth, allocate
|
||
|
+ * Out parameters: channel, bandwidth
|
||
|
+ * This function blocks (sleeps) during communication with the IRM.
|
||
|
+ *
|
||
|
+ * Allocates or deallocates at most one channel out of channels_mask.
|
||
|
+ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
|
||
|
+ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
|
||
|
+ * channel 0 and LSB for channel 63.)
|
||
|
+ * Allocates or deallocates as many bandwidth allocation units as specified.
|
||
|
+ *
|
||
|
+ * Returns channel < 0 if no channel was allocated or deallocated.
|
||
|
+ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
|
||
|
+ *
|
||
|
+ * If generation is stale, deallocations succeed but allocations fail with
|
||
|
+ * channel = -EAGAIN.
|
||
|
+ *
|
||
|
+ * If channel allocation fails, no bandwidth will be allocated either.
|
||
|
+ * If bandwidth allocation fails, no channel will be allocated either.
|
||
|
+ * But deallocations of channel and bandwidth are tried independently
|
||
|
+ * of each other's success.
|
||
|
+ */
|
||
|
+void fw_iso_resource_manage(struct fw_card *card, int generation,
|
||
|
+ u64 channels_mask, int *channel, int *bandwidth,
|
||
|
+ bool allocate)
|
||
|
+{
|
||
|
+ u32 channels_hi = channels_mask; /* channels 31...0 */
|
||
|
+ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
|
||
|
+ int irm_id, ret, c = -EINVAL;
|
||
|
+
|
||
|
+ spin_lock_irq(&card->lock);
|
||
|
+ irm_id = card->irm_node->node_id;
|
||
|
+ spin_unlock_irq(&card->lock);
|
||
|
+
|
||
|
+ if (channels_hi)
|
||
|
+ c = manage_channel(card, irm_id, generation, channels_hi,
|
||
|
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
|
||
|
+ if (channels_lo && c < 0) {
|
||
|
+ c = manage_channel(card, irm_id, generation, channels_lo,
|
||
|
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
|
||
|
+ if (c >= 0)
|
||
|
+ c += 32;
|
||
|
+ }
|
||
|
+ *channel = c;
|
||
|
+
|
||
|
+ if (allocate && channels_mask != 0 && c < 0)
|
||
|
+ *bandwidth = 0;
|
||
|
+
|
||
|
+ if (*bandwidth == 0)
|
||
|
+ return;
|
||
|
+
|
||
|
+ ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
|
||
|
+ if (ret < 0)
|
||
|
+ *bandwidth = 0;
|
||
|
+
|
||
|
+ if (allocate && ret < 0 && c >= 0) {
|
||
|
+ deallocate_channel(card, irm_id, generation, c);
|
||
|
+ *channel = ret;
|
||
|
+ }
|
||
|
+}
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-ohci.c firewire-git/drivers/firewire/fw-ohci.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-ohci.c 2009-01-30 13:39:02.990772025 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-ohci.c 2009-01-30 13:35:51.861646907 -0500
|
||
|
@@ -205,6 +205,7 @@ struct fw_ohci {
|
||
|
|
||
|
u32 it_context_mask;
|
||
|
struct iso_context *it_context_list;
|
||
|
+ u64 ir_context_channels;
|
||
|
u32 ir_context_mask;
|
||
|
struct iso_context *ir_context_list;
|
||
|
};
|
||
|
@@ -441,9 +442,8 @@ static inline void flush_writes(const st
|
||
|
reg_read(ohci, OHCI1394_Version);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_update_phy_reg(struct fw_card *card, int addr,
|
||
|
- int clear_bits, int set_bits)
|
||
|
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
|
||
|
+ int clear_bits, int set_bits)
|
||
|
{
|
||
|
struct fw_ohci *ohci = fw_ohci(card);
|
||
|
u32 val, old;
|
||
|
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
|
||
|
+static int ar_context_init(struct ar_context *ctx,
|
||
|
+ struct fw_ohci *ohci, u32 regs)
|
||
|
{
|
||
|
struct ar_buffer ab;
|
||
|
|
||
|
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_con
|
||
|
flush_writes(ctx->ohci);
|
||
|
}
|
||
|
|
||
|
-static struct descriptor *
|
||
|
-find_branch_descriptor(struct descriptor *d, int z)
|
||
|
+static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
|
||
|
{
|
||
|
int b, key;
|
||
|
|
||
|
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned lon
|
||
|
* Allocate a new buffer and add it to the list of free buffers for this
|
||
|
* context. Must be called with ohci->lock held.
|
||
|
*/
|
||
|
-static int
|
||
|
-context_add_buffer(struct context *ctx)
|
||
|
+static int context_add_buffer(struct context *ctx)
|
||
|
{
|
||
|
struct descriptor_buffer *desc;
|
||
|
dma_addr_t uninitialized_var(bus_addr);
|
||
|
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-context_init(struct context *ctx, struct fw_ohci *ohci,
|
||
|
- u32 regs, descriptor_callback_t callback)
|
||
|
+static int context_init(struct context *ctx, struct fw_ohci *ohci,
|
||
|
+ u32 regs, descriptor_callback_t callback)
|
||
|
{
|
||
|
ctx->ohci = ohci;
|
||
|
ctx->regs = regs;
|
||
|
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-context_release(struct context *ctx)
|
||
|
+static void context_release(struct context *ctx)
|
||
|
{
|
||
|
struct fw_card *card = &ctx->ohci->card;
|
||
|
struct descriptor_buffer *desc, *tmp;
|
||
|
@@ -827,8 +823,8 @@ context_release(struct context *ctx)
|
||
|
}
|
||
|
|
||
|
/* Must be called with ohci->lock held */
|
||
|
-static struct descriptor *
|
||
|
-context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
|
||
|
+static struct descriptor *context_get_descriptors(struct context *ctx,
|
||
|
+ int z, dma_addr_t *d_bus)
|
||
|
{
|
||
|
struct descriptor *d = NULL;
|
||
|
struct descriptor_buffer *desc = ctx->buffer_tail;
|
||
|
@@ -912,8 +908,8 @@ struct driver_data {
|
||
|
* Must always be called with the ochi->lock held to ensure proper
|
||
|
* generation handling and locking around packet queue manipulation.
|
||
|
*/
|
||
|
-static int
|
||
|
-at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
|
||
|
+static int at_context_queue_packet(struct context *ctx,
|
||
|
+ struct fw_packet *packet)
|
||
|
{
|
||
|
struct fw_ohci *ohci = ctx->ohci;
|
||
|
dma_addr_t d_bus, uninitialized_var(payload_bus);
|
||
|
@@ -1095,8 +1091,8 @@ static int handle_at_packet(struct conte
|
||
|
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
|
||
|
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
|
||
|
|
||
|
-static void
|
||
|
-handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
|
||
|
+static void handle_local_rom(struct fw_ohci *ohci,
|
||
|
+ struct fw_packet *packet, u32 csr)
|
||
|
{
|
||
|
struct fw_packet response;
|
||
|
int tcode, length, i;
|
||
|
@@ -1122,8 +1118,8 @@ handle_local_rom(struct fw_ohci *ohci, s
|
||
|
fw_core_handle_response(&ohci->card, &response);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
|
||
|
+static void handle_local_lock(struct fw_ohci *ohci,
|
||
|
+ struct fw_packet *packet, u32 csr)
|
||
|
{
|
||
|
struct fw_packet response;
|
||
|
int tcode, length, ext_tcode, sel;
|
||
|
@@ -1164,8 +1160,7 @@ handle_local_lock(struct fw_ohci *ohci,
|
||
|
fw_core_handle_response(&ohci->card, &response);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-handle_local_request(struct context *ctx, struct fw_packet *packet)
|
||
|
+static void handle_local_request(struct context *ctx, struct fw_packet *packet)
|
||
|
{
|
||
|
u64 offset;
|
||
|
u32 csr;
|
||
|
@@ -1205,11 +1200,10 @@ handle_local_request(struct context *ctx
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-at_context_transmit(struct context *ctx, struct fw_packet *packet)
|
||
|
+static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
|
||
|
{
|
||
|
unsigned long flags;
|
||
|
- int retval;
|
||
|
+ int ret;
|
||
|
|
||
|
spin_lock_irqsave(&ctx->ohci->lock, flags);
|
||
|
|
||
|
@@ -1220,10 +1214,10 @@ at_context_transmit(struct context *ctx,
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
- retval = at_context_queue_packet(ctx, packet);
|
||
|
+ ret = at_context_queue_packet(ctx, packet);
|
||
|
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
|
||
|
|
||
|
- if (retval < 0)
|
||
|
+ if (ret < 0)
|
||
|
packet->callback(packet, &ctx->ohci->card, packet->ack);
|
||
|
|
||
|
}
|
||
|
@@ -1590,12 +1584,12 @@ static int ohci_enable(struct fw_card *c
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
|
||
|
+static int ohci_set_config_rom(struct fw_card *card,
|
||
|
+ u32 *config_rom, size_t length)
|
||
|
{
|
||
|
struct fw_ohci *ohci;
|
||
|
unsigned long flags;
|
||
|
- int retval = -EBUSY;
|
||
|
+ int ret = -EBUSY;
|
||
|
__be32 *next_config_rom;
|
||
|
dma_addr_t uninitialized_var(next_config_rom_bus);
|
||
|
|
||
|
@@ -1649,7 +1643,7 @@ ohci_set_config_rom(struct fw_card *card
|
||
|
|
||
|
reg_write(ohci, OHCI1394_ConfigROMmap,
|
||
|
ohci->next_config_rom_bus);
|
||
|
- retval = 0;
|
||
|
+ ret = 0;
|
||
|
}
|
||
|
|
||
|
spin_unlock_irqrestore(&ohci->lock, flags);
|
||
|
@@ -1661,13 +1655,13 @@ ohci_set_config_rom(struct fw_card *card
|
||
|
* controller could need to access it before the bus reset
|
||
|
* takes effect.
|
||
|
*/
|
||
|
- if (retval == 0)
|
||
|
+ if (ret == 0)
|
||
|
fw_core_initiate_bus_reset(&ohci->card, 1);
|
||
|
else
|
||
|
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
|
||
|
next_config_rom, next_config_rom_bus);
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
|
||
|
@@ -1689,7 +1683,7 @@ static int ohci_cancel_packet(struct fw_
|
||
|
struct fw_ohci *ohci = fw_ohci(card);
|
||
|
struct context *ctx = &ohci->at_request_ctx;
|
||
|
struct driver_data *driver_data = packet->driver_data;
|
||
|
- int retval = -ENOENT;
|
||
|
+ int ret = -ENOENT;
|
||
|
|
||
|
tasklet_disable(&ctx->tasklet);
|
||
|
|
||
|
@@ -1704,23 +1698,22 @@ static int ohci_cancel_packet(struct fw_
|
||
|
driver_data->packet = NULL;
|
||
|
packet->ack = RCODE_CANCELLED;
|
||
|
packet->callback(packet, &ohci->card, packet->ack);
|
||
|
- retval = 0;
|
||
|
-
|
||
|
+ ret = 0;
|
||
|
out:
|
||
|
tasklet_enable(&ctx->tasklet);
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
|
||
|
+static int ohci_enable_phys_dma(struct fw_card *card,
|
||
|
+ int node_id, int generation)
|
||
|
{
|
||
|
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
|
||
|
return 0;
|
||
|
#else
|
||
|
struct fw_ohci *ohci = fw_ohci(card);
|
||
|
unsigned long flags;
|
||
|
- int n, retval = 0;
|
||
|
+ int n, ret = 0;
|
||
|
|
||
|
/*
|
||
|
* FIXME: Make sure this bitmask is cleared when we clear the busReset
|
||
|
@@ -1730,7 +1723,7 @@ ohci_enable_phys_dma(struct fw_card *car
|
||
|
spin_lock_irqsave(&ohci->lock, flags);
|
||
|
|
||
|
if (ohci->generation != generation) {
|
||
|
- retval = -ESTALE;
|
||
|
+ ret = -ESTALE;
|
||
|
goto out;
|
||
|
}
|
||
|
|
||
|
@@ -1748,12 +1741,12 @@ ohci_enable_phys_dma(struct fw_card *car
|
||
|
flush_writes(ohci);
|
||
|
out:
|
||
|
spin_unlock_irqrestore(&ohci->lock, flags);
|
||
|
- return retval;
|
||
|
+
|
||
|
+ return ret;
|
||
|
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
|
||
|
}
|
||
|
|
||
|
-static u64
|
||
|
-ohci_get_bus_time(struct fw_card *card)
|
||
|
+static u64 ohci_get_bus_time(struct fw_card *card)
|
||
|
{
|
||
|
struct fw_ohci *ohci = fw_ohci(card);
|
||
|
u32 cycle_time;
|
||
|
@@ -1765,6 +1758,28 @@ ohci_get_bus_time(struct fw_card *card)
|
||
|
return bus_time;
|
||
|
}
|
||
|
|
||
|
+static void copy_iso_headers(struct iso_context *ctx, void *p)
|
||
|
+{
|
||
|
+ int i = ctx->header_length;
|
||
|
+
|
||
|
+ if (i + ctx->base.header_size > PAGE_SIZE)
|
||
|
+ return;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * The iso header is byteswapped to little endian by
|
||
|
+ * the controller, but the remaining header quadlets
|
||
|
+ * are big endian. We want to present all the headers
|
||
|
+ * as big endian, so we have to swap the first quadlet.
|
||
|
+ */
|
||
|
+ if (ctx->base.header_size > 0)
|
||
|
+ *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
|
||
|
+ if (ctx->base.header_size > 4)
|
||
|
+ *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
|
||
|
+ if (ctx->base.header_size > 8)
|
||
|
+ memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
|
||
|
+ ctx->header_length += ctx->base.header_size;
|
||
|
+}
|
||
|
+
|
||
|
static int handle_ir_dualbuffer_packet(struct context *context,
|
||
|
struct descriptor *d,
|
||
|
struct descriptor *last)
|
||
|
@@ -1775,7 +1790,6 @@ static int handle_ir_dualbuffer_packet(s
|
||
|
__le32 *ir_header;
|
||
|
size_t header_length;
|
||
|
void *p, *end;
|
||
|
- int i;
|
||
|
|
||
|
if (db->first_res_count != 0 && db->second_res_count != 0) {
|
||
|
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
|
||
|
@@ -1788,25 +1802,14 @@ static int handle_ir_dualbuffer_packet(s
|
||
|
header_length = le16_to_cpu(db->first_req_count) -
|
||
|
le16_to_cpu(db->first_res_count);
|
||
|
|
||
|
- i = ctx->header_length;
|
||
|
p = db + 1;
|
||
|
end = p + header_length;
|
||
|
- while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
|
||
|
- /*
|
||
|
- * The iso header is byteswapped to little endian by
|
||
|
- * the controller, but the remaining header quadlets
|
||
|
- * are big endian. We want to present all the headers
|
||
|
- * as big endian, so we have to swap the first
|
||
|
- * quadlet.
|
||
|
- */
|
||
|
- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
|
||
|
- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
|
||
|
- i += ctx->base.header_size;
|
||
|
+ while (p < end) {
|
||
|
+ copy_iso_headers(ctx, p);
|
||
|
ctx->excess_bytes +=
|
||
|
(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
|
||
|
- p += ctx->base.header_size + 4;
|
||
|
+ p += max(ctx->base.header_size, (size_t)8);
|
||
|
}
|
||
|
- ctx->header_length = i;
|
||
|
|
||
|
ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
|
||
|
le16_to_cpu(db->second_res_count);
|
||
|
@@ -1832,7 +1835,6 @@ static int handle_ir_packet_per_buffer(s
|
||
|
struct descriptor *pd;
|
||
|
__le32 *ir_header;
|
||
|
void *p;
|
||
|
- int i;
|
||
|
|
||
|
for (pd = d; pd <= last; pd++) {
|
||
|
if (pd->transfer_status)
|
||
|
@@ -1842,21 +1844,8 @@ static int handle_ir_packet_per_buffer(s
|
||
|
/* Descriptor(s) not done yet, stop iteration */
|
||
|
return 0;
|
||
|
|
||
|
- i = ctx->header_length;
|
||
|
- p = last + 1;
|
||
|
-
|
||
|
- if (ctx->base.header_size > 0 &&
|
||
|
- i + ctx->base.header_size <= PAGE_SIZE) {
|
||
|
- /*
|
||
|
- * The iso header is byteswapped to little endian by
|
||
|
- * the controller, but the remaining header quadlets
|
||
|
- * are big endian. We want to present all the headers
|
||
|
- * as big endian, so we have to swap the first quadlet.
|
||
|
- */
|
||
|
- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
|
||
|
- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
|
||
|
- ctx->header_length += ctx->base.header_size;
|
||
|
- }
|
||
|
+ p = last + 1;
|
||
|
+ copy_iso_headers(ctx, p);
|
||
|
|
||
|
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
|
||
|
ir_header = (__le32 *) p;
|
||
|
@@ -1888,21 +1877,24 @@ static int handle_it_packet(struct conte
|
||
|
return 1;
|
||
|
}
|
||
|
|
||
|
-static struct fw_iso_context *
|
||
|
-ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
|
||
|
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
|
||
|
+ int type, int channel, size_t header_size)
|
||
|
{
|
||
|
struct fw_ohci *ohci = fw_ohci(card);
|
||
|
struct iso_context *ctx, *list;
|
||
|
descriptor_callback_t callback;
|
||
|
+ u64 *channels, dont_care = ~0ULL;
|
||
|
u32 *mask, regs;
|
||
|
unsigned long flags;
|
||
|
- int index, retval = -ENOMEM;
|
||
|
+ int index, ret = -ENOMEM;
|
||
|
|
||
|
if (type == FW_ISO_CONTEXT_TRANSMIT) {
|
||
|
+ channels = &dont_care;
|
||
|
mask = &ohci->it_context_mask;
|
||
|
list = ohci->it_context_list;
|
||
|
callback = handle_it_packet;
|
||
|
} else {
|
||
|
+ channels = &ohci->ir_context_channels;
|
||
|
mask = &ohci->ir_context_mask;
|
||
|
list = ohci->ir_context_list;
|
||
|
if (ohci->use_dualbuffer)
|
||
|
@@ -1912,9 +1904,11 @@ ohci_allocate_iso_context(struct fw_card
|
||
|
}
|
||
|
|
||
|
spin_lock_irqsave(&ohci->lock, flags);
|
||
|
- index = ffs(*mask) - 1;
|
||
|
- if (index >= 0)
|
||
|
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
|
||
|
+ if (index >= 0) {
|
||
|
+ *channels &= ~(1ULL << channel);
|
||
|
*mask &= ~(1 << index);
|
||
|
+ }
|
||
|
spin_unlock_irqrestore(&ohci->lock, flags);
|
||
|
|
||
|
if (index < 0)
|
||
|
@@ -1932,8 +1926,8 @@ ohci_allocate_iso_context(struct fw_card
|
||
|
if (ctx->header == NULL)
|
||
|
goto out;
|
||
|
|
||
|
- retval = context_init(&ctx->context, ohci, regs, callback);
|
||
|
- if (retval < 0)
|
||
|
+ ret = context_init(&ctx->context, ohci, regs, callback);
|
||
|
+ if (ret < 0)
|
||
|
goto out_with_header;
|
||
|
|
||
|
return &ctx->base;
|
||
|
@@ -1945,7 +1939,7 @@ ohci_allocate_iso_context(struct fw_card
|
||
|
*mask |= 1 << index;
|
||
|
spin_unlock_irqrestore(&ohci->lock, flags);
|
||
|
|
||
|
- return ERR_PTR(retval);
|
||
|
+ return ERR_PTR(ret);
|
||
|
}
|
||
|
|
||
|
static int ohci_start_iso(struct fw_iso_context *base,
|
||
|
@@ -2024,16 +2018,16 @@ static void ohci_free_iso_context(struct
|
||
|
} else {
|
||
|
index = ctx - ohci->ir_context_list;
|
||
|
ohci->ir_context_mask |= 1 << index;
|
||
|
+ ohci->ir_context_channels |= 1ULL << base->channel;
|
||
|
}
|
||
|
|
||
|
spin_unlock_irqrestore(&ohci->lock, flags);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_queue_iso_transmit(struct fw_iso_context *base,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload)
|
||
|
+static int ohci_queue_iso_transmit(struct fw_iso_context *base,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload)
|
||
|
{
|
||
|
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||
|
struct descriptor *d, *last, *pd;
|
||
|
@@ -2128,11 +2122,10 @@ ohci_queue_iso_transmit(struct fw_iso_co
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload)
|
||
|
+static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload)
|
||
|
{
|
||
|
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||
|
struct db_descriptor *db = NULL;
|
||
|
@@ -2151,11 +2144,11 @@ ohci_queue_iso_receive_dualbuffer(struct
|
||
|
z = 2;
|
||
|
|
||
|
/*
|
||
|
- * The OHCI controller puts the status word in the header
|
||
|
- * buffer too, so we need 4 extra bytes per packet.
|
||
|
+ * The OHCI controller puts the isochronous header and trailer in the
|
||
|
+ * buffer, so we need at least 8 bytes.
|
||
|
*/
|
||
|
packet_count = p->header_length / ctx->base.header_size;
|
||
|
- header_size = packet_count * (ctx->base.header_size + 4);
|
||
|
+ header_size = packet_count * max(ctx->base.header_size, (size_t)8);
|
||
|
|
||
|
/* Get header size in number of descriptors. */
|
||
|
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
|
||
|
@@ -2173,7 +2166,8 @@ ohci_queue_iso_receive_dualbuffer(struct
|
||
|
db = (struct db_descriptor *) d;
|
||
|
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
|
||
|
DESCRIPTOR_BRANCH_ALWAYS);
|
||
|
- db->first_size = cpu_to_le16(ctx->base.header_size + 4);
|
||
|
+ db->first_size =
|
||
|
+ cpu_to_le16(max(ctx->base.header_size, (size_t)8));
|
||
|
if (p->skip && rest == p->payload_length) {
|
||
|
db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
|
||
|
db->first_req_count = db->first_size;
|
||
|
@@ -2208,11 +2202,10 @@ ohci_queue_iso_receive_dualbuffer(struct
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload)
|
||
|
+static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload)
|
||
|
{
|
||
|
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||
|
struct descriptor *d = NULL, *pd = NULL;
|
||
|
@@ -2223,11 +2216,11 @@ ohci_queue_iso_receive_packet_per_buffer
|
||
|
int page, offset, packet_count, header_size, payload_per_buffer;
|
||
|
|
||
|
/*
|
||
|
- * The OHCI controller puts the status word in the
|
||
|
- * buffer too, so we need 4 extra bytes per packet.
|
||
|
+ * The OHCI controller puts the isochronous header and trailer in the
|
||
|
+ * buffer, so we need at least 8 bytes.
|
||
|
*/
|
||
|
packet_count = p->header_length / ctx->base.header_size;
|
||
|
- header_size = ctx->base.header_size + 4;
|
||
|
+ header_size = max(ctx->base.header_size, (size_t)8);
|
||
|
|
||
|
/* Get header size in number of descriptors. */
|
||
|
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
|
||
|
@@ -2286,29 +2279,27 @@ ohci_queue_iso_receive_packet_per_buffer
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-ohci_queue_iso(struct fw_iso_context *base,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload)
|
||
|
+static int ohci_queue_iso(struct fw_iso_context *base,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload)
|
||
|
{
|
||
|
struct iso_context *ctx = container_of(base, struct iso_context, base);
|
||
|
unsigned long flags;
|
||
|
- int retval;
|
||
|
+ int ret;
|
||
|
|
||
|
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
|
||
|
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
|
||
|
- retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
|
||
|
+ ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
|
||
|
else if (ctx->context.ohci->use_dualbuffer)
|
||
|
- retval = ohci_queue_iso_receive_dualbuffer(base, packet,
|
||
|
- buffer, payload);
|
||
|
+ ret = ohci_queue_iso_receive_dualbuffer(base, packet,
|
||
|
+ buffer, payload);
|
||
|
else
|
||
|
- retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
|
||
|
- buffer,
|
||
|
- payload);
|
||
|
+ ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
|
||
|
+ buffer, payload);
|
||
|
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
|
||
|
|
||
|
- return retval;
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static const struct fw_card_driver ohci_driver = {
|
||
|
@@ -2357,8 +2348,8 @@ static void ohci_pmac_off(struct pci_dev
|
||
|
#define ohci_pmac_off(dev)
|
||
|
#endif /* CONFIG_PPC_PMAC */
|
||
|
|
||
|
-static int __devinit
|
||
|
-pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
|
||
|
+static int __devinit pci_probe(struct pci_dev *dev,
|
||
|
+ const struct pci_device_id *ent)
|
||
|
{
|
||
|
struct fw_ohci *ohci;
|
||
|
u32 bus_options, max_receive, link_speed, version;
|
||
|
@@ -2440,6 +2431,7 @@ pci_probe(struct pci_dev *dev, const str
|
||
|
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
|
||
|
|
||
|
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
|
||
|
+ ohci->ir_context_channels = ~0ULL;
|
||
|
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
|
||
|
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
|
||
|
size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-sbp2.c firewire-git/drivers/firewire/fw-sbp2.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:39:02.991771976 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:35:51.861646907 -0500
|
||
|
@@ -392,20 +392,18 @@ static const struct {
|
||
|
}
|
||
|
};
|
||
|
|
||
|
-static void
|
||
|
-free_orb(struct kref *kref)
|
||
|
+static void free_orb(struct kref *kref)
|
||
|
{
|
||
|
struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
|
||
|
|
||
|
kfree(orb);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
||
|
- int tcode, int destination, int source,
|
||
|
- int generation, int speed,
|
||
|
- unsigned long long offset,
|
||
|
- void *payload, size_t length, void *callback_data)
|
||
|
+static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
|
||
|
+ int tcode, int destination, int source,
|
||
|
+ int generation, int speed,
|
||
|
+ unsigned long long offset,
|
||
|
+ void *payload, size_t length, void *callback_data)
|
||
|
{
|
||
|
struct sbp2_logical_unit *lu = callback_data;
|
||
|
struct sbp2_orb *orb;
|
||
|
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card,
|
||
|
fw_send_response(card, request, RCODE_COMPLETE);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-complete_transaction(struct fw_card *card, int rcode,
|
||
|
- void *payload, size_t length, void *data)
|
||
|
+static void complete_transaction(struct fw_card *card, int rcode,
|
||
|
+ void *payload, size_t length, void *data)
|
||
|
{
|
||
|
struct sbp2_orb *orb = data;
|
||
|
unsigned long flags;
|
||
|
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *car
|
||
|
kref_put(&orb->kref, free_orb);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
|
||
|
- int node_id, int generation, u64 offset)
|
||
|
+static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
|
||
|
+ int node_id, int generation, u64 offset)
|
||
|
{
|
||
|
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
|
||
|
unsigned long flags;
|
||
|
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_
|
||
|
return retval;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
|
||
|
+static void complete_management_orb(struct sbp2_orb *base_orb,
|
||
|
+ struct sbp2_status *status)
|
||
|
{
|
||
|
struct sbp2_management_orb *orb =
|
||
|
container_of(base_orb, struct sbp2_management_orb, base);
|
||
|
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb
|
||
|
complete(&orb->done);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
||
|
- int generation, int function, int lun_or_login_id,
|
||
|
- void *response)
|
||
|
+static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
||
|
+ int generation, int function,
|
||
|
+ int lun_or_login_id, void *response)
|
||
|
{
|
||
|
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
|
||
|
struct sbp2_management_orb *orb;
|
||
|
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2
|
||
|
&d, sizeof(d));
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
|
||
|
- void *payload, size_t length, void *data)
|
||
|
+static void complete_agent_reset_write_no_wait(struct fw_card *card,
|
||
|
+ int rcode, void *payload, size_t length, void *data)
|
||
|
{
|
||
|
kfree(data);
|
||
|
}
|
||
|
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struc
|
||
|
sizeof(orb->page_table), DMA_TO_DEVICE);
|
||
|
}
|
||
|
|
||
|
-static unsigned int
|
||
|
-sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
|
||
|
+static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
|
||
|
{
|
||
|
int sam_status;
|
||
|
|
||
|
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_statu
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
|
||
|
+static void complete_command_orb(struct sbp2_orb *base_orb,
|
||
|
+ struct sbp2_status *status)
|
||
|
{
|
||
|
struct sbp2_command_orb *orb =
|
||
|
container_of(base_orb, struct sbp2_command_orb, base);
|
||
|
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *ba
|
||
|
orb->done(orb->cmd);
|
||
|
}
|
||
|
|
||
|
-static int
|
||
|
-sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
|
||
|
- struct sbp2_logical_unit *lu)
|
||
|
+static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
|
||
|
+ struct fw_device *device, struct sbp2_logical_unit *lu)
|
||
|
{
|
||
|
struct scatterlist *sg = scsi_sglist(orb->cmd);
|
||
|
int i, n;
|
||
|
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_c
|
||
|
* This is the concatenation of target port identifier and logical unit
|
||
|
* identifier as per SAM-2...SAM-4 annex A.
|
||
|
*/
|
||
|
-static ssize_t
|
||
|
-sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
|
||
|
- char *buf)
|
||
|
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
|
||
|
+ struct device_attribute *attr, char *buf)
|
||
|
{
|
||
|
struct scsi_device *sdev = to_scsi_device(dev);
|
||
|
struct sbp2_logical_unit *lu;
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.c firewire-git/drivers/firewire/fw-topology.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-topology.c 2009-01-30 13:39:02.991771976 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-topology.c 2009-01-30 13:35:51.862647087 -0500
|
||
|
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struc
|
||
|
struct fw_node * node,
|
||
|
struct fw_node * parent);
|
||
|
|
||
|
-static void
|
||
|
-for_each_fw_node(struct fw_card *card, struct fw_node *root,
|
||
|
- fw_node_callback_t callback)
|
||
|
+static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
|
||
|
+ fw_node_callback_t callback)
|
||
|
{
|
||
|
struct list_head list;
|
||
|
struct fw_node *node, *next, *child, *parent;
|
||
|
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, s
|
||
|
fw_node_put(node);
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-report_lost_node(struct fw_card *card,
|
||
|
- struct fw_node *node, struct fw_node *parent)
|
||
|
+static void report_lost_node(struct fw_card *card,
|
||
|
+ struct fw_node *node, struct fw_node *parent)
|
||
|
{
|
||
|
fw_node_event(card, node, FW_NODE_DESTROYED);
|
||
|
fw_node_put(node);
|
||
|
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
|
||
|
card->bm_retries = 0;
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-report_found_node(struct fw_card *card,
|
||
|
- struct fw_node *node, struct fw_node *parent)
|
||
|
+static void report_found_node(struct fw_card *card,
|
||
|
+ struct fw_node *node, struct fw_node *parent)
|
||
|
{
|
||
|
int b_path = (node->phy_speed == SCODE_BETA);
|
||
|
|
||
|
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *no
|
||
|
* found, lost or updated. Update the nodes in the card topology tree
|
||
|
* as we go.
|
||
|
*/
|
||
|
-static void
|
||
|
-update_tree(struct fw_card *card, struct fw_node *root)
|
||
|
+static void update_tree(struct fw_card *card, struct fw_node *root)
|
||
|
{
|
||
|
struct list_head list0, list1;
|
||
|
struct fw_node *node0, *node1, *next1;
|
||
|
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
|
||
|
+static void update_topology_map(struct fw_card *card,
|
||
|
+ u32 *self_ids, int self_id_count)
|
||
|
{
|
||
|
int node_count;
|
||
|
|
||
|
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card
|
||
|
fw_compute_block_crc(card->topology_map);
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_core_handle_bus_reset(struct fw_card *card,
|
||
|
- int node_id, int generation,
|
||
|
- int self_id_count, u32 * self_ids)
|
||
|
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
||
|
+ int self_id_count, u32 *self_ids)
|
||
|
{
|
||
|
struct fw_node *local_node;
|
||
|
unsigned long flags;
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.h firewire-git/drivers/firewire/fw-topology.h
|
||
|
--- linux-2.6-git/drivers/firewire/fw-topology.h 2008-11-04 11:18:33.000000000 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-topology.h 2009-01-30 13:35:51.862647087 -0500
|
||
|
@@ -19,6 +19,11 @@
|
||
|
#ifndef __fw_topology_h
|
||
|
#define __fw_topology_h
|
||
|
|
||
|
+#include <linux/list.h>
|
||
|
+#include <linux/slab.h>
|
||
|
+
|
||
|
+#include <asm/atomic.h>
|
||
|
+
|
||
|
enum {
|
||
|
FW_NODE_CREATED,
|
||
|
FW_NODE_UPDATED,
|
||
|
@@ -51,26 +56,22 @@ struct fw_node {
|
||
|
struct fw_node *ports[0];
|
||
|
};
|
||
|
|
||
|
-static inline struct fw_node *
|
||
|
-fw_node_get(struct fw_node *node)
|
||
|
+static inline struct fw_node *fw_node_get(struct fw_node *node)
|
||
|
{
|
||
|
atomic_inc(&node->ref_count);
|
||
|
|
||
|
return node;
|
||
|
}
|
||
|
|
||
|
-static inline void
|
||
|
-fw_node_put(struct fw_node *node)
|
||
|
+static inline void fw_node_put(struct fw_node *node)
|
||
|
{
|
||
|
if (atomic_dec_and_test(&node->ref_count))
|
||
|
kfree(node);
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_destroy_nodes(struct fw_card *card);
|
||
|
-
|
||
|
-int
|
||
|
-fw_compute_block_crc(u32 *block);
|
||
|
+struct fw_card;
|
||
|
+void fw_destroy_nodes(struct fw_card *card);
|
||
|
|
||
|
+int fw_compute_block_crc(u32 *block);
|
||
|
|
||
|
#endif /* __fw_topology_h */
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.c firewire-git/drivers/firewire/fw-transaction.c
|
||
|
--- linux-2.6-git/drivers/firewire/fw-transaction.c 2009-01-30 13:39:02.991771976 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-transaction.c 2009-01-30 13:35:51.862647087 -0500
|
||
|
@@ -64,10 +64,9 @@
|
||
|
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
|
||
|
#define PHY_IDENTIFIER(id) ((id) << 30)
|
||
|
|
||
|
-static int
|
||
|
-close_transaction(struct fw_transaction *transaction,
|
||
|
- struct fw_card *card, int rcode,
|
||
|
- u32 *payload, size_t length)
|
||
|
+static int close_transaction(struct fw_transaction *transaction,
|
||
|
+ struct fw_card *card, int rcode,
|
||
|
+ u32 *payload, size_t length)
|
||
|
{
|
||
|
struct fw_transaction *t;
|
||
|
unsigned long flags;
|
||
|
@@ -94,9 +93,8 @@ close_transaction(struct fw_transaction
|
||
|
* Only valid for transactions that are potentially pending (ie have
|
||
|
* been sent).
|
||
|
*/
|
||
|
-int
|
||
|
-fw_cancel_transaction(struct fw_card *card,
|
||
|
- struct fw_transaction *transaction)
|
||
|
+int fw_cancel_transaction(struct fw_card *card,
|
||
|
+ struct fw_transaction *transaction)
|
||
|
{
|
||
|
/*
|
||
|
* Cancel the packet transmission if it's still queued. That
|
||
|
@@ -116,9 +114,8 @@ fw_cancel_transaction(struct fw_card *ca
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_cancel_transaction);
|
||
|
|
||
|
-static void
|
||
|
-transmit_complete_callback(struct fw_packet *packet,
|
||
|
- struct fw_card *card, int status)
|
||
|
+static void transmit_complete_callback(struct fw_packet *packet,
|
||
|
+ struct fw_card *card, int status)
|
||
|
{
|
||
|
struct fw_transaction *t =
|
||
|
container_of(packet, struct fw_transaction, packet);
|
||
|
@@ -151,8 +148,7 @@ transmit_complete_callback(struct fw_pac
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void
|
||
|
-fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||
|
+static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||
|
int destination_id, int source_id, int generation, int speed,
|
||
|
unsigned long long offset, void *payload, size_t length)
|
||
|
{
|
||
|
@@ -247,12 +243,10 @@ fw_fill_request(struct fw_packet *packet
|
||
|
* @param callback_data pointer to arbitrary data, which will be
|
||
|
* passed to the callback
|
||
|
*/
|
||
|
-void
|
||
|
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
|
||
|
- int tcode, int destination_id, int generation, int speed,
|
||
|
- unsigned long long offset,
|
||
|
- void *payload, size_t length,
|
||
|
- fw_transaction_callback_t callback, void *callback_data)
|
||
|
+void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
|
||
|
+ int destination_id, int generation, int speed,
|
||
|
+ unsigned long long offset, void *payload, size_t length,
|
||
|
+ fw_transaction_callback_t callback, void *callback_data)
|
||
|
{
|
||
|
unsigned long flags;
|
||
|
int tlabel;
|
||
|
@@ -322,8 +316,8 @@ static void transaction_callback(struct
|
||
|
* Returns the RCODE.
|
||
|
*/
|
||
|
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
|
||
|
- int generation, int speed, unsigned long long offset,
|
||
|
- void *data, size_t length)
|
||
|
+ int generation, int speed, unsigned long long offset,
|
||
|
+ void *data, size_t length)
|
||
|
{
|
||
|
struct transaction_callback_data d;
|
||
|
struct fw_transaction t;
|
||
|
@@ -399,9 +393,8 @@ void fw_flush_transactions(struct fw_car
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static struct fw_address_handler *
|
||
|
-lookup_overlapping_address_handler(struct list_head *list,
|
||
|
- unsigned long long offset, size_t length)
|
||
|
+static struct fw_address_handler *lookup_overlapping_address_handler(
|
||
|
+ struct list_head *list, unsigned long long offset, size_t length)
|
||
|
{
|
||
|
struct fw_address_handler *handler;
|
||
|
|
||
|
@@ -414,9 +407,8 @@ lookup_overlapping_address_handler(struc
|
||
|
return NULL;
|
||
|
}
|
||
|
|
||
|
-static struct fw_address_handler *
|
||
|
-lookup_enclosing_address_handler(struct list_head *list,
|
||
|
- unsigned long long offset, size_t length)
|
||
|
+static struct fw_address_handler *lookup_enclosing_address_handler(
|
||
|
+ struct list_head *list, unsigned long long offset, size_t length)
|
||
|
{
|
||
|
struct fw_address_handler *handler;
|
||
|
|
||
|
@@ -449,36 +441,44 @@ const struct fw_address_region fw_unit_s
|
||
|
#endif /* 0 */
|
||
|
|
||
|
/**
|
||
|
- * Allocate a range of addresses in the node space of the OHCI
|
||
|
- * controller. When a request is received that falls within the
|
||
|
- * specified address range, the specified callback is invoked. The
|
||
|
- * parameters passed to the callback give the details of the
|
||
|
- * particular request.
|
||
|
+ * fw_core_add_address_handler - register for incoming requests
|
||
|
+ * @handler: callback
|
||
|
+ * @region: region in the IEEE 1212 node space address range
|
||
|
+ *
|
||
|
+ * region->start, ->end, and handler->length have to be quadlet-aligned.
|
||
|
+ *
|
||
|
+ * When a request is received that falls within the specified address range,
|
||
|
+ * the specified callback is invoked. The parameters passed to the callback
|
||
|
+ * give the details of the particular request.
|
||
|
*
|
||
|
* Return value: 0 on success, non-zero otherwise.
|
||
|
* The start offset of the handler's address region is determined by
|
||
|
* fw_core_add_address_handler() and is returned in handler->offset.
|
||
|
- * The offset is quadlet-aligned.
|
||
|
*/
|
||
|
-int
|
||
|
-fw_core_add_address_handler(struct fw_address_handler *handler,
|
||
|
- const struct fw_address_region *region)
|
||
|
+int fw_core_add_address_handler(struct fw_address_handler *handler,
|
||
|
+ const struct fw_address_region *region)
|
||
|
{
|
||
|
struct fw_address_handler *other;
|
||
|
unsigned long flags;
|
||
|
int ret = -EBUSY;
|
||
|
|
||
|
+ if (region->start & 0xffff000000000003ULL ||
|
||
|
+ region->end & 0xffff000000000003ULL ||
|
||
|
+ region->start >= region->end ||
|
||
|
+ handler->length & 3 ||
|
||
|
+ handler->length == 0)
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
spin_lock_irqsave(&address_handler_lock, flags);
|
||
|
|
||
|
- handler->offset = roundup(region->start, 4);
|
||
|
+ handler->offset = region->start;
|
||
|
while (handler->offset + handler->length <= region->end) {
|
||
|
other =
|
||
|
lookup_overlapping_address_handler(&address_handler_list,
|
||
|
handler->offset,
|
||
|
handler->length);
|
||
|
if (other != NULL) {
|
||
|
- handler->offset =
|
||
|
- roundup(other->offset + other->length, 4);
|
||
|
+ handler->offset += other->length;
|
||
|
} else {
|
||
|
list_add_tail(&handler->link, &address_handler_list);
|
||
|
ret = 0;
|
||
|
@@ -493,12 +493,7 @@ fw_core_add_address_handler(struct fw_ad
|
||
|
EXPORT_SYMBOL(fw_core_add_address_handler);
|
||
|
|
||
|
/**
|
||
|
- * Deallocate a range of addresses allocated with fw_allocate. This
|
||
|
- * will call the associated callback one last time with a the special
|
||
|
- * tcode TCODE_DEALLOCATE, to let the client destroy the registered
|
||
|
- * callback data. For convenience, the callback parameters offset and
|
||
|
- * length are set to the start and the length respectively for the
|
||
|
- * deallocated region, payload is set to NULL.
|
||
|
+ * fw_core_remove_address_handler - unregister an address handler
|
||
|
*/
|
||
|
void fw_core_remove_address_handler(struct fw_address_handler *handler)
|
||
|
{
|
||
|
@@ -518,9 +513,8 @@ struct fw_request {
|
||
|
u32 data[0];
|
||
|
};
|
||
|
|
||
|
-static void
|
||
|
-free_response_callback(struct fw_packet *packet,
|
||
|
- struct fw_card *card, int status)
|
||
|
+static void free_response_callback(struct fw_packet *packet,
|
||
|
+ struct fw_card *card, int status)
|
||
|
{
|
||
|
struct fw_request *request;
|
||
|
|
||
|
@@ -528,9 +522,8 @@ free_response_callback(struct fw_packet
|
||
|
kfree(request);
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_fill_response(struct fw_packet *response, u32 *request_header,
|
||
|
- int rcode, void *payload, size_t length)
|
||
|
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
|
||
|
+ int rcode, void *payload, size_t length)
|
||
|
{
|
||
|
int tcode, tlabel, extended_tcode, source, destination;
|
||
|
|
||
|
@@ -588,8 +581,7 @@ fw_fill_response(struct fw_packet *respo
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_fill_response);
|
||
|
|
||
|
-static struct fw_request *
|
||
|
-allocate_request(struct fw_packet *p)
|
||
|
+static struct fw_request *allocate_request(struct fw_packet *p)
|
||
|
{
|
||
|
struct fw_request *request;
|
||
|
u32 *data, length;
|
||
|
@@ -649,8 +641,8 @@ allocate_request(struct fw_packet *p)
|
||
|
return request;
|
||
|
}
|
||
|
|
||
|
-void
|
||
|
-fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
|
||
|
+void fw_send_response(struct fw_card *card,
|
||
|
+ struct fw_request *request, int rcode)
|
||
|
{
|
||
|
/* unified transaction or broadcast transaction: don't respond */
|
||
|
if (request->ack != ACK_PENDING ||
|
||
|
@@ -670,8 +662,7 @@ fw_send_response(struct fw_card *card, s
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_send_response);
|
||
|
|
||
|
-void
|
||
|
-fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
|
||
|
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
|
||
|
{
|
||
|
struct fw_address_handler *handler;
|
||
|
struct fw_request *request;
|
||
|
@@ -719,8 +710,7 @@ fw_core_handle_request(struct fw_card *c
|
||
|
}
|
||
|
EXPORT_SYMBOL(fw_core_handle_request);
|
||
|
|
||
|
-void
|
||
|
-fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||
|
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||
|
{
|
||
|
struct fw_transaction *t;
|
||
|
unsigned long flags;
|
||
|
@@ -793,12 +783,10 @@ static const struct fw_address_region to
|
||
|
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
|
||
|
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
|
||
|
|
||
|
-static void
|
||
|
-handle_topology_map(struct fw_card *card, struct fw_request *request,
|
||
|
- int tcode, int destination, int source,
|
||
|
- int generation, int speed,
|
||
|
- unsigned long long offset,
|
||
|
- void *payload, size_t length, void *callback_data)
|
||
|
+static void handle_topology_map(struct fw_card *card, struct fw_request *request,
|
||
|
+ int tcode, int destination, int source, int generation,
|
||
|
+ int speed, unsigned long long offset,
|
||
|
+ void *payload, size_t length, void *callback_data)
|
||
|
{
|
||
|
int i, start, end;
|
||
|
__be32 *map;
|
||
|
@@ -832,12 +820,10 @@ static const struct fw_address_region re
|
||
|
{ .start = CSR_REGISTER_BASE,
|
||
|
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
|
||
|
|
||
|
-static void
|
||
|
-handle_registers(struct fw_card *card, struct fw_request *request,
|
||
|
- int tcode, int destination, int source,
|
||
|
- int generation, int speed,
|
||
|
- unsigned long long offset,
|
||
|
- void *payload, size_t length, void *callback_data)
|
||
|
+static void handle_registers(struct fw_card *card, struct fw_request *request,
|
||
|
+ int tcode, int destination, int source, int generation,
|
||
|
+ int speed, unsigned long long offset,
|
||
|
+ void *payload, size_t length, void *callback_data)
|
||
|
{
|
||
|
int reg = offset & ~CSR_REGISTER_BASE;
|
||
|
unsigned long long bus_time;
|
||
|
@@ -939,11 +925,11 @@ static struct fw_descriptor model_id_des
|
||
|
|
||
|
static int __init fw_core_init(void)
|
||
|
{
|
||
|
- int retval;
|
||
|
+ int ret;
|
||
|
|
||
|
- retval = bus_register(&fw_bus_type);
|
||
|
- if (retval < 0)
|
||
|
- return retval;
|
||
|
+ ret = bus_register(&fw_bus_type);
|
||
|
+ if (ret < 0)
|
||
|
+ return ret;
|
||
|
|
||
|
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
|
||
|
if (fw_cdev_major < 0) {
|
||
|
@@ -951,19 +937,10 @@ static int __init fw_core_init(void)
|
||
|
return fw_cdev_major;
|
||
|
}
|
||
|
|
||
|
- retval = fw_core_add_address_handler(&topology_map,
|
||
|
- &topology_map_region);
|
||
|
- BUG_ON(retval < 0);
|
||
|
-
|
||
|
- retval = fw_core_add_address_handler(®isters,
|
||
|
- ®isters_region);
|
||
|
- BUG_ON(retval < 0);
|
||
|
-
|
||
|
- /* Add the vendor textual descriptor. */
|
||
|
- retval = fw_core_add_descriptor(&vendor_id_descriptor);
|
||
|
- BUG_ON(retval < 0);
|
||
|
- retval = fw_core_add_descriptor(&model_id_descriptor);
|
||
|
- BUG_ON(retval < 0);
|
||
|
+ fw_core_add_address_handler(&topology_map, &topology_map_region);
|
||
|
+ fw_core_add_address_handler(®isters, ®isters_region);
|
||
|
+ fw_core_add_descriptor(&vendor_id_descriptor);
|
||
|
+ fw_core_add_descriptor(&model_id_descriptor);
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.h firewire-git/drivers/firewire/fw-transaction.h
|
||
|
--- linux-2.6-git/drivers/firewire/fw-transaction.h 2009-01-30 13:39:02.992772636 -0500
|
||
|
+++ firewire-git/drivers/firewire/fw-transaction.h 2009-01-30 13:35:51.862647087 -0500
|
||
|
@@ -82,14 +82,14 @@
|
||
|
#define CSR_SPEED_MAP 0x2000
|
||
|
#define CSR_SPEED_MAP_END 0x3000
|
||
|
|
||
|
+#define BANDWIDTH_AVAILABLE_INITIAL 4915
|
||
|
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
|
||
|
#define BROADCAST_CHANNEL_VALID (1 << 30)
|
||
|
|
||
|
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
|
||
|
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
|
||
|
|
||
|
-static inline void
|
||
|
-fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
|
||
|
+static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
|
||
|
{
|
||
|
u32 *dst = _dst;
|
||
|
__be32 *src = _src;
|
||
|
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_s
|
||
|
dst[i] = be32_to_cpu(src[i]);
|
||
|
}
|
||
|
|
||
|
-static inline void
|
||
|
-fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
|
||
|
+static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
|
||
|
{
|
||
|
fw_memcpy_from_be32(_dst, _src, size);
|
||
|
}
|
||
|
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(str
|
||
|
struct fw_card *card, int status);
|
||
|
|
||
|
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
|
||
|
- void *data,
|
||
|
- size_t length,
|
||
|
+ void *data, size_t length,
|
||
|
void *callback_data);
|
||
|
|
||
|
/*
|
||
|
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(st
|
||
|
void *data, size_t length,
|
||
|
void *callback_data);
|
||
|
|
||
|
-typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
|
||
|
- int node_id, int generation,
|
||
|
- u32 *self_ids,
|
||
|
- int self_id_count,
|
||
|
- void *callback_data);
|
||
|
-
|
||
|
struct fw_packet {
|
||
|
int speed;
|
||
|
int generation;
|
||
|
@@ -187,12 +179,6 @@ struct fw_transaction {
|
||
|
void *callback_data;
|
||
|
};
|
||
|
|
||
|
-static inline struct fw_packet *
|
||
|
-fw_packet(struct list_head *l)
|
||
|
-{
|
||
|
- return list_entry(l, struct fw_packet, link);
|
||
|
-}
|
||
|
-
|
||
|
struct fw_address_handler {
|
||
|
u64 offset;
|
||
|
size_t length;
|
||
|
@@ -201,7 +187,6 @@ struct fw_address_handler {
|
||
|
struct list_head link;
|
||
|
};
|
||
|
|
||
|
-
|
||
|
struct fw_address_region {
|
||
|
u64 start;
|
||
|
u64 end;
|
||
|
@@ -315,10 +300,8 @@ struct fw_iso_packet {
|
||
|
struct fw_iso_context;
|
||
|
|
||
|
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
|
||
|
- u32 cycle,
|
||
|
- size_t header_length,
|
||
|
- void *header,
|
||
|
- void *data);
|
||
|
+ u32 cycle, size_t header_length,
|
||
|
+ void *header, void *data);
|
||
|
|
||
|
/*
|
||
|
* An iso buffer is just a set of pages mapped for DMA in the
|
||
|
@@ -344,36 +327,25 @@ struct fw_iso_context {
|
||
|
void *callback_data;
|
||
|
};
|
||
|
|
||
|
-int
|
||
|
-fw_iso_buffer_init(struct fw_iso_buffer *buffer,
|
||
|
- struct fw_card *card,
|
||
|
- int page_count,
|
||
|
- enum dma_data_direction direction);
|
||
|
-int
|
||
|
-fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
|
||
|
-void
|
||
|
-fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
|
||
|
-
|
||
|
-struct fw_iso_context *
|
||
|
-fw_iso_context_create(struct fw_card *card, int type,
|
||
|
- int channel, int speed, size_t header_size,
|
||
|
- fw_iso_callback_t callback, void *callback_data);
|
||
|
-
|
||
|
-void
|
||
|
-fw_iso_context_destroy(struct fw_iso_context *ctx);
|
||
|
-
|
||
|
-int
|
||
|
-fw_iso_context_queue(struct fw_iso_context *ctx,
|
||
|
- struct fw_iso_packet *packet,
|
||
|
- struct fw_iso_buffer *buffer,
|
||
|
- unsigned long payload);
|
||
|
-
|
||
|
-int
|
||
|
-fw_iso_context_start(struct fw_iso_context *ctx,
|
||
|
- int cycle, int sync, int tags);
|
||
|
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||
|
+ int page_count, enum dma_data_direction direction);
|
||
|
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
|
||
|
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
|
||
|
+
|
||
|
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
|
||
|
+ int type, int channel, int speed, size_t header_size,
|
||
|
+ fw_iso_callback_t callback, void *callback_data);
|
||
|
+int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||
|
+ struct fw_iso_packet *packet,
|
||
|
+ struct fw_iso_buffer *buffer,
|
||
|
+ unsigned long payload);
|
||
|
+int fw_iso_context_start(struct fw_iso_context *ctx,
|
||
|
+ int cycle, int sync, int tags);
|
||
|
+int fw_iso_context_stop(struct fw_iso_context *ctx);
|
||
|
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
|
||
|
|
||
|
-int
|
||
|
-fw_iso_context_stop(struct fw_iso_context *ctx);
|
||
|
+void fw_iso_resource_manage(struct fw_card *card, int generation,
|
||
|
+ u64 channels_mask, int *channel, int *bandwidth, bool allocate);
|
||
|
|
||
|
struct fw_card_driver {
|
||
|
/*
|
||
|
@@ -415,7 +387,7 @@ struct fw_card_driver {
|
||
|
|
||
|
struct fw_iso_context *
|
||
|
(*allocate_iso_context)(struct fw_card *card,
|
||
|
- int type, size_t header_size);
|
||
|
+ int type, int channel, size_t header_size);
|
||
|
void (*free_iso_context)(struct fw_iso_context *ctx);
|
||
|
|
||
|
int (*start_iso)(struct fw_iso_context *ctx,
|
||
|
@@ -429,24 +401,18 @@ struct fw_card_driver {
|
||
|
int (*stop_iso)(struct fw_iso_context *ctx);
|
||
|
};
|
||
|
|
||
|
-int
|
||
|
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
|
||
|
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
|
||
|
|
||
|
-void
|
||
|
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
|
||
|
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
|
||
|
int tcode, int destination_id, int generation, int speed,
|
||
|
unsigned long long offset, void *data, size_t length,
|
||
|
fw_transaction_callback_t callback, void *callback_data);
|
||
|
-
|
||
|
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
|
||
|
- int generation, int speed, unsigned long long offset,
|
||
|
- void *data, size_t length);
|
||
|
-
|
||
|
int fw_cancel_transaction(struct fw_card *card,
|
||
|
struct fw_transaction *transaction);
|
||
|
-
|
||
|
void fw_flush_transactions(struct fw_card *card);
|
||
|
-
|
||
|
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
|
||
|
+ int generation, int speed, unsigned long long offset,
|
||
|
+ void *data, size_t length);
|
||
|
void fw_send_phy_config(struct fw_card *card,
|
||
|
int node_id, int generation, int gap_count);
|
||
|
|
||
|
@@ -454,29 +420,18 @@ void fw_send_phy_config(struct fw_card *
|
||
|
* Called by the topology code to inform the device code of node
|
||
|
* activity; found, lost, or updated nodes.
|
||
|
*/
|
||
|
-void
|
||
|
-fw_node_event(struct fw_card *card, struct fw_node *node, int event);
|
||
|
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
|
||
|
|
||
|
/* API used by card level drivers */
|
||
|
|
||
|
-void
|
||
|
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
|
||
|
- struct device *device);
|
||
|
-int
|
||
|
-fw_card_add(struct fw_card *card,
|
||
|
- u32 max_receive, u32 link_speed, u64 guid);
|
||
|
-
|
||
|
-void
|
||
|
-fw_core_remove_card(struct fw_card *card);
|
||
|
-
|
||
|
-void
|
||
|
-fw_core_handle_bus_reset(struct fw_card *card,
|
||
|
- int node_id, int generation,
|
||
|
- int self_id_count, u32 *self_ids);
|
||
|
-void
|
||
|
-fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
|
||
|
-
|
||
|
-void
|
||
|
-fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
|
||
|
+void fw_card_initialize(struct fw_card *card,
|
||
|
+ const struct fw_card_driver *driver, struct device *device);
|
||
|
+int fw_card_add(struct fw_card *card,
|
||
|
+ u32 max_receive, u32 link_speed, u64 guid);
|
||
|
+void fw_core_remove_card(struct fw_card *card);
|
||
|
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
|
||
|
+ int generation, int self_id_count, u32 *self_ids);
|
||
|
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
|
||
|
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
|
||
|
|
||
|
#endif /* __fw_transaction_h */
|
||
|
--- linux-2.6-git/include/linux/firewire-cdev.h 2008-11-04 11:19:21.000000000 -0500
|
||
|
+++ firewire-git/include/linux/firewire-cdev.h 2009-01-30 13:35:54.327647015 -0500
|
||
|
@@ -25,10 +25,12 @@
|
||
|
#include <linux/types.h>
|
||
|
#include <linux/firewire-constants.h>
|
||
|
|
||
|
-#define FW_CDEV_EVENT_BUS_RESET 0x00
|
||
|
-#define FW_CDEV_EVENT_RESPONSE 0x01
|
||
|
-#define FW_CDEV_EVENT_REQUEST 0x02
|
||
|
-#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
|
||
|
+#define FW_CDEV_EVENT_BUS_RESET 0x00
|
||
|
+#define FW_CDEV_EVENT_RESPONSE 0x01
|
||
|
+#define FW_CDEV_EVENT_REQUEST 0x02
|
||
|
+#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
|
||
|
+#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
|
||
|
+#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
|
||
|
|
||
|
/**
|
||
|
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
|
||
|
@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
|
||
|
* This event is sent when the controller has completed an &fw_cdev_iso_packet
|
||
|
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
|
||
|
* stripped of all packets up until and including the interrupt packet are
|
||
|
- * returned in the @header field.
|
||
|
+ * returned in the @header field. The amount of header data per packet is as
|
||
|
+ * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
|
||
|
+ *
|
||
|
+ * In version 1 of this ABI, header data consisted of the 1394 isochronous
|
||
|
+ * packet header, followed by quadlets from the packet payload if
|
||
|
+ * &fw_cdev_create_iso_context.header_size > 4.
|
||
|
+ *
|
||
|
+ * In version 2 of this ABI, header data consist of the 1394 isochronous
|
||
|
+ * packet header, followed by a timestamp quadlet if
|
||
|
+ * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
|
||
|
+ * packet payload if &fw_cdev_create_iso_context.header_size > 8.
|
||
|
+ *
|
||
|
+ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
|
||
|
+ *
|
||
|
+ * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
|
||
|
+ * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
|
||
|
+ * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
|
||
|
+ * order.
|
||
|
*/
|
||
|
struct fw_cdev_event_iso_interrupt {
|
||
|
__u64 closure;
|
||
|
@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt {
|
||
|
};
|
||
|
|
||
|
/**
|
||
|
+ * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
|
||
|
+ * @closure: See &fw_cdev_event_common;
|
||
|
+ * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
|
||
|
+ * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
|
||
|
+ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
|
||
|
+ * @handle: Reference by which an allocated resource can be deallocated
|
||
|
+ * @channel: Isochronous channel which was (de)allocated, if any
|
||
|
+ * @bandwidth: Bandwidth allocation units which were (de)allocated, if any
|
||
|
+ *
|
||
|
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
|
||
|
+ * resource was allocated at the IRM. The client has to check @channel and
|
||
|
+ * @bandwidth for whether the allocation actually succeeded.
|
||
|
+ *
|
||
|
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
|
||
|
+ * resource was deallocated at the IRM. It is also sent when automatic
|
||
|
+ * reallocation after a bus reset failed.
|
||
|
+ *
|
||
|
+ * @channel is <0 if no channel was (de)allocated or if reallocation failed.
|
||
|
+ * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
|
||
|
+ */
|
||
|
+struct fw_cdev_event_iso_resource {
|
||
|
+ __u64 closure;
|
||
|
+ __u32 type;
|
||
|
+ __u32 handle;
|
||
|
+ __s32 channel;
|
||
|
+ __s32 bandwidth;
|
||
|
+};
|
||
|
+
|
||
|
+/**
|
||
|
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
|
||
|
* @common: Valid for all types
|
||
|
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
|
||
|
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
|
||
|
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
|
||
|
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
|
||
|
+ * @iso_resource: Valid if @common.type ==
|
||
|
+ * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
|
||
|
+ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
|
||
|
*
|
||
|
* Convenience union for userspace use. Events could be read(2) into an
|
||
|
* appropriately aligned char buffer and then cast to this union for further
|
||
|
@@ -163,13 +214,15 @@ struct fw_cdev_event_iso_interrupt {
|
||
|
* not fit will be discarded so that the next read(2) will return a new event.
|
||
|
*/
|
||
|
union fw_cdev_event {
|
||
|
- struct fw_cdev_event_common common;
|
||
|
- struct fw_cdev_event_bus_reset bus_reset;
|
||
|
- struct fw_cdev_event_response response;
|
||
|
- struct fw_cdev_event_request request;
|
||
|
- struct fw_cdev_event_iso_interrupt iso_interrupt;
|
||
|
+ struct fw_cdev_event_common common;
|
||
|
+ struct fw_cdev_event_bus_reset bus_reset;
|
||
|
+ struct fw_cdev_event_response response;
|
||
|
+ struct fw_cdev_event_request request;
|
||
|
+ struct fw_cdev_event_iso_interrupt iso_interrupt;
|
||
|
+ struct fw_cdev_event_iso_resource iso_resource;
|
||
|
};
|
||
|
|
||
|
+/* available since kernel version 2.6.22 */
|
||
|
#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
|
||
|
#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
|
||
|
#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
|
||
|
@@ -178,18 +231,29 @@ union fw_cdev_event {
|
||
|
#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
|
||
|
#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
|
||
|
#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
|
||
|
-
|
||
|
#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
|
||
|
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
|
||
|
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
|
||
|
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
|
||
|
+
|
||
|
+/* available since kernel version 2.6.24 */
|
||
|
#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
|
||
|
|
||
|
-/* FW_CDEV_VERSION History
|
||
|
- *
|
||
|
- * 1 Feb 18, 2007: Initial version.
|
||
|
+/* available since kernel version 2.6.30 */
|
||
|
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
|
||
|
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
|
||
|
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
|
||
|
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
|
||
|
+#define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed)
|
||
|
+#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
|
||
|
+
|
||
|
+/*
|
||
|
+ * FW_CDEV_VERSION History
|
||
|
+ * 1 (2.6.22) - initial version
|
||
|
+ * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
|
||
|
+ * &fw_cdev_create_iso_context.header_size is 8 or more
|
||
|
*/
|
||
|
-#define FW_CDEV_VERSION 1
|
||
|
+#define FW_CDEV_VERSION 2
|
||
|
|
||
|
/**
|
||
|
* struct fw_cdev_get_info - General purpose information ioctl
|
||
|
@@ -201,7 +265,7 @@ union fw_cdev_event {
|
||
|
* case, @rom_length is updated with the actual length of the
|
||
|
* configuration ROM.
|
||
|
* @rom: If non-zero, address of a buffer to be filled by a copy of the
|
||
|
- * local node's configuration ROM
|
||
|
+ * device's configuration ROM
|
||
|
* @bus_reset: If non-zero, address of a buffer to be filled by a
|
||
|
* &struct fw_cdev_event_bus_reset with the current state
|
||
|
* of the bus. This does not cause a bus reset to happen.
|
||
|
@@ -229,7 +293,7 @@ struct fw_cdev_get_info {
|
||
|
* Send a request to the device. This ioctl implements all outgoing requests.
|
||
|
* Both quadlet and block request specify the payload as a pointer to the data
|
||
|
* in the @data field. Once the transaction completes, the kernel writes an
|
||
|
- * &fw_cdev_event_request event back. The @closure field is passed back to
|
||
|
+ * &fw_cdev_event_response event back. The @closure field is passed back to
|
||
|
* user space in the response event.
|
||
|
*/
|
||
|
struct fw_cdev_send_request {
|
||
|
@@ -284,9 +348,9 @@ struct fw_cdev_allocate {
|
||
|
};
|
||
|
|
||
|
/**
|
||
|
- * struct fw_cdev_deallocate - Free an address range allocation
|
||
|
- * @handle: Handle to the address range, as returned by the kernel when the
|
||
|
- * range was allocated
|
||
|
+ * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
|
||
|
+ * @handle: Handle to the address range or iso resource, as returned by the
|
||
|
+ * kernel when the range or resource was allocated
|
||
|
*/
|
||
|
struct fw_cdev_deallocate {
|
||
|
__u32 handle;
|
||
|
@@ -370,6 +434,9 @@ struct fw_cdev_remove_descriptor {
|
||
|
*
|
||
|
* If a context was successfully created, the kernel writes back a handle to the
|
||
|
* context, which must be passed in for subsequent operations on that context.
|
||
|
+ *
|
||
|
+ * Note that the effect of a @header_size > 4 depends on
|
||
|
+ * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
|
||
|
*/
|
||
|
struct fw_cdev_create_iso_context {
|
||
|
__u32 type;
|
||
|
@@ -473,10 +540,73 @@ struct fw_cdev_stop_iso {
|
||
|
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
|
||
|
* and also the system clock. This allows to express the receive time of an
|
||
|
* isochronous packet as a system time with microsecond accuracy.
|
||
|
+ *
|
||
|
+ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
|
||
|
+ * 12 bits cycleOffset, in host byte order.
|
||
|
*/
|
||
|
struct fw_cdev_get_cycle_timer {
|
||
|
__u64 local_time;
|
||
|
__u32 cycle_timer;
|
||
|
};
|
||
|
|
||
|
+/**
|
||
|
+ * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
|
||
|
+ * @closure: Passed back to userspace in correponding iso resource events
|
||
|
+ * @channels: Isochronous channels of which one is to be (de)allocated
|
||
|
+ * @bandwidth: Isochronous bandwidth units to be (de)allocated
|
||
|
+ * @handle: Handle to the allocation, written by the kernel (only valid in
|
||
|
+ * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
|
||
|
+ *
|
||
|
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
|
||
|
+ * isochronous channel and/or of isochronous bandwidth at the isochronous
|
||
|
+ * resource manager (IRM). Only one of the channels specified in @channels is
|
||
|
+ * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
|
||
|
+ * communication with the IRM, indicating success or failure in the event data.
|
||
|
+ * The kernel will automatically reallocate the resources after bus resets.
|
||
|
+ * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
|
||
|
+ * will be sent. The kernel will also automatically deallocate the resources
|
||
|
+ * when the file descriptor is closed.
|
||
|
+ *
|
||
|
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
|
||
|
+ * deallocation of resources which were allocated as described above.
|
||
|
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
|
||
|
+ *
|
||
|
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
|
||
|
+ * without automatic re- or deallocation.
|
||
|
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
|
||
|
+ * indicating success or failure in its data.
|
||
|
+ *
|
||
|
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
|
||
|
+ * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
|
||
|
+ * instead of allocated.
|
||
|
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
|
||
|
+ *
|
||
|
+ * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
|
||
|
+ * for the lifetime of the fd or handle.
|
||
|
+ * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
|
||
|
+ * for the duration of a bus generation.
|
||
|
+ *
|
||
|
+ * @channels is a host-endian bitfield with the least significant bit
|
||
|
+ * representing channel 0 and the most significant bit representing channel 63:
|
||
|
+ * 1ULL << c for each channel c that is a candidate for (de)allocation.
|
||
|
+ *
|
||
|
+ * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
|
||
|
+ * one quadlet of data (payload or header data) at speed S1600.
|
||
|
+ */
|
||
|
+struct fw_cdev_allocate_iso_resource {
|
||
|
+ __u64 closure;
|
||
|
+ __u64 channels;
|
||
|
+ __u32 bandwidth;
|
||
|
+ __u32 handle;
|
||
|
+};
|
||
|
+
|
||
|
+/**
|
||
|
+ * struct fw_cdev_get_speed - Query maximum speed to or from this device
|
||
|
+ * @max_speed: Speed code; minimum of the device's link speed, the local node's
|
||
|
+ * link speed, and all PHY port speeds between the two links
|
||
|
+ */
|
||
|
+struct fw_cdev_get_speed {
|
||
|
+ __u32 max_speed;
|
||
|
+};
|
||
|
+
|
||
|
#endif /* _LINUX_FIREWIRE_CDEV_H */
|