Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (31 commits)
  firewire: fw-sbp2: fix DMA mapping of management ORBs
  firewire: fw-sbp2: fix DMA mapping of command ORBs
  firewire: fw-sbp2: fix DMA mapping of S/G tables
  firewire: fw-sbp2: add a boundary check
  firewire: fw-sbp2: correctly align page tables
  firewire: fw-sbp2: memset wants string.h
  firewire: fw-sbp2: use correct speed in sbp2_agent_reset
  firewire: fw-sbp2: correctly dereference by container_of
  firewire: Document userspace ioctl interface.
  firewire: fw-sbp2: implement nonexclusive login
  firewire: fw-sbp2: let SCSI shutdown commands through before logout
  firewire: fw-sbp2: implement max sectors limit for some old bridges
  firewire: simplify a struct type
  firewire: support S100B...S400B and link slower than PHY
  firewire: optimize gap count with 1394b leaf nodes
  firewire: remove unused macro
  firewire: missing newline in printk
  firewire: fw-sbp2: remove unused struct member
  ieee1394: remove old isochronous ABI
  ieee1394: sbp2: change some module parameters from int to bool
  ...
This commit is contained in:
Linus Torvalds 2007-07-09 15:50:56 -07:00
Родитель 36b774102e 7aa484815f
Коммит 71780f59e1
33 изменённых файлов: 745 добавлений и 883 удалений

Просмотреть файл

@ -0,0 +1,16 @@
What: legacy isochronous ABI of raw1394 (1st generation iso ABI)
Date: June 2007 (scheduled), removed in kernel v2.6.23
Contact: linux1394-devel@lists.sourceforge.net
Description:
The two request types RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN have
been deprecated for quite some time. They are very inefficient as they
come with high interrupt load and several layers of callbacks for each
packet. Because of these deficiencies, the video1394 and dv1394 drivers
and the 3rd-generation isochronous ABI in raw1394 (rawiso) were created.
Users:
libraw1394 users via the long deprecated API raw1394_iso_write,
raw1394_start_iso_write, raw1394_start_iso_rcv, raw1394_stop_iso_rcv
libdc1394, which optionally uses these old libraw1394 calls
alternatively to the more efficient video1394 ABI

Просмотреть файл

@ -49,16 +49,6 @@ Who: Adrian Bunk <bunk@stusta.de>
---------------------------
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: June 2007
Why: Deprecated in favour of the more efficient and robust rawiso interface.
Affected are applications which use the deprecated part of libraw1394
(raw1394_iso_write, raw1394_start_iso_write, raw1394_start_iso_rcv,
raw1394_stop_iso_rcv) or bypass libraw1394.
Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: old NCR53C9x driver
When: October 2007
Why: Replaced by the much better esp_scsi driver. Actual low-level

Просмотреть файл

@ -336,8 +336,11 @@ fw_card_bm_work(struct work_struct *work)
}
pick_me:
/* Now figure out what gap count to set. */
if (card->topology_type == FW_TOPOLOGY_A &&
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
gap_count = gap_count_table[card->root_node->max_hops];
else

Просмотреть файл

@ -397,7 +397,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
request->tcode & 0x1f,
device->node->node_id,
request->generation,
device->node->max_speed,
device->max_speed,
request->offset,
response->response.data, request->length,
complete_transaction, response);

Просмотреть файл

@ -401,8 +401,7 @@ static int read_rom(struct fw_device *device, int index, u32 * data)
offset = 0xfffff0000400ULL + index * 4;
fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
device->node_id,
device->generation, SCODE_100,
device->node_id, device->generation, device->max_speed,
offset, NULL, 4, complete_transaction, &callback_data);
wait_for_completion(&callback_data.done);
@ -418,6 +417,8 @@ static int read_bus_info_block(struct fw_device *device)
u32 stack[16], sp, key;
int i, end, length;
device->max_speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
@ -434,6 +435,33 @@ static int read_bus_info_block(struct fw_device *device)
return -1;
}
device->max_speed = device->node->max_speed;
/*
* Determine the speed of
* - devices with link speed less than PHY speed,
* - devices with 1394b PHY (unless only connected to 1394a PHYs),
* - all devices if there are 1394b repeaters.
* Note, we cannot use the bus info block's link_spd as starting point
* because some buggy firmwares set it lower than necessary and because
* 1394-1995 nodes do not have the field.
*/
if ((rom[2] & 0x7) < device->max_speed ||
device->max_speed == SCODE_BETA ||
device->card->beta_repeaters_present) {
u32 dummy;
/* for S1600 and S3200 */
if (device->max_speed == SCODE_BETA)
device->max_speed = device->card->link_speed;
while (device->max_speed > SCODE_100) {
if (read_rom(device, 0, &dummy) == RCODE_COMPLETE)
break;
device->max_speed--;
}
}
/*
* Now parse the config rom. The config rom is a recursive
* directory structure so we parse it using a stack of
@ -680,8 +708,10 @@ static void fw_device_init(struct work_struct *work)
FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
fw_device_shutdown(&device->work.work);
else
fw_notify("created new fw device %s (%d config rom retries)\n",
device->device.bus_id, device->config_rom_retries);
fw_notify("created new fw device %s "
"(%d config rom retries, S%d00)\n",
device->device.bus_id, device->config_rom_retries,
1 << device->max_speed);
/*
* Reschedule the IRM work if we just finished reading the

Просмотреть файл

@ -40,6 +40,7 @@ struct fw_device {
struct fw_node *node;
int node_id;
int generation;
unsigned max_speed;
struct fw_card *card;
struct device device;
struct list_head link;

Просмотреть файл

@ -1934,12 +1934,12 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
free_irq(pdev->irq, ohci);
err = pci_save_state(pdev);
if (err) {
fw_error("pci_save_state failed with %d", err);
fw_error("pci_save_state failed\n");
return err;
}
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err) {
fw_error("pci_set_power_state failed with %d", err);
fw_error("pci_set_power_state failed\n");
return err;
}
@ -1955,7 +1955,7 @@ static int pci_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
fw_error("pci_enable_device failed with %d", err);
fw_error("pci_enable_device failed\n");
return err;
}

Просмотреть файл

@ -30,10 +30,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <scsi/scsi.h>
@ -46,6 +49,18 @@
#include "fw-topology.h"
#include "fw-device.h"
/*
* So far only bridges from Oxford Semiconductor are known to support
* concurrent logins. Depending on firmware, four or two concurrent logins
* are possible on OXFW911 and newer Oxsemi bridges.
*
* Concurrent logins are useful together with cluster filesystems.
*/
static int sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = Y, use N for concurrent initiators)");
/* I don't know why the SCSI stack doesn't define something like this... */
typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
@ -154,7 +169,7 @@ struct sbp2_orb {
#define MANAGEMENT_ORB_LUN(v) ((v))
#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
#define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
@ -205,9 +220,8 @@ struct sbp2_command_orb {
scsi_done_fn_t done;
struct fw_unit *unit;
struct sbp2_pointer page_table[SG_ALL];
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus;
dma_addr_t request_buffer_bus;
};
/*
@ -347,8 +361,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
spin_unlock_irqrestore(&device->card->lock, flags);
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation,
device->node->max_speed, offset,
node_id, generation, device->max_speed, offset,
&orb->pointer, sizeof(orb->pointer),
complete_transaction, orb);
}
@ -383,7 +396,7 @@ static void
complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
struct sbp2_management_orb *orb =
(struct sbp2_management_orb *)base_orb;
container_of(base_orb, struct sbp2_management_orb, base);
if (status)
memcpy(&orb->status, status, sizeof(*status));
@ -403,21 +416,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
if (orb == NULL)
return -ENOMEM;
/*
* The sbp2 device is going to send a block read request to
* read out the request from host memory, so map it for dma.
*/
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus))
goto out;
orb->response_bus =
dma_map_single(device->card->device, &orb->response,
sizeof(orb->response), DMA_FROM_DEVICE);
if (dma_mapping_error(orb->response_bus))
goto out;
goto fail_mapping_response;
orb->request.response.high = 0;
orb->request.response.low = orb->response_bus;
@ -432,14 +435,9 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
orb->request.status_fifo.high = sd->address_handler.offset >> 32;
orb->request.status_fifo.low = sd->address_handler.offset;
/*
* FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
* login and 1 second reconnect time. The reconnect setting
* is probably fine, but the exclusive login should be an option.
*/
if (function == SBP2_LOGIN_REQUEST) {
orb->request.misc |=
MANAGEMENT_ORB_EXCLUSIVE |
MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login) |
MANAGEMENT_ORB_RECONNECT(0);
}
@ -448,6 +446,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
init_completion(&orb->done);
orb->base.callback = complete_management_orb;
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping_request;
sbp2_send_orb(&orb->base, unit,
node_id, generation, sd->management_agent_address);
@ -479,9 +483,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
out:
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
fail_mapping_request:
dma_unmap_single(device->card->device, orb->response_bus,
sizeof(orb->response), DMA_FROM_DEVICE);
fail_mapping_response:
if (response)
fw_memcpy_from_be32(response,
orb->response, sizeof(orb->response));
@ -511,7 +516,7 @@ static int sbp2_agent_reset(struct fw_unit *unit)
return -ENOMEM;
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
sd->node_id, sd->generation, SCODE_400,
sd->node_id, sd->generation, device->max_speed,
sd->command_block_agent_address + SBP2_AGENT_RESET,
&zero, sizeof(zero), complete_agent_reset_write, t);
@ -521,17 +526,15 @@ static int sbp2_agent_reset(struct fw_unit *unit)
static void sbp2_reconnect(struct work_struct *work);
static struct scsi_host_template scsi_driver_template;
static void
release_sbp2_device(struct kref *kref)
static void release_sbp2_device(struct kref *kref)
{
struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
struct Scsi_Host *host =
container_of((void *)sd, struct Scsi_Host, hostdata[0]);
scsi_remove_host(host);
sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
scsi_remove_host(host);
fw_core_remove_address_handler(&sd->address_handler);
fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
put_device(&sd->unit->device);
@ -833,7 +836,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
static void
complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{
struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
struct fw_unit *unit = orb->unit;
struct fw_device *device = fw_device(unit->device.parent);
struct scatterlist *sg;
@ -880,12 +884,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
if (orb->page_table_bus != 0)
dma_unmap_single(device->card->device, orb->page_table_bus,
sizeof(orb->page_table_bus), DMA_TO_DEVICE);
if (orb->request_buffer_bus != 0)
dma_unmap_single(device->card->device, orb->request_buffer_bus,
sizeof(orb->request_buffer_bus),
DMA_FROM_DEVICE);
sizeof(orb->page_table), DMA_TO_DEVICE);
orb->cmd->result = result;
orb->done(orb->cmd);
@ -900,7 +899,6 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
struct fw_device *device = fw_device(unit->device.parent);
struct scatterlist *sg;
int sg_len, l, i, j, count;
size_t size;
dma_addr_t sg_addr;
sg = (struct scatterlist *)orb->cmd->request_buffer;
@ -935,6 +933,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
sg_len = sg_dma_len(sg + i);
sg_addr = sg_dma_address(sg + i);
while (sg_len) {
/* FIXME: This won't get us out of the pinch. */
if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
fw_error("page table overflow\n");
goto fail_page_table;
}
l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
orb->page_table[j].low = sg_addr;
orb->page_table[j].high = (l << 16);
@ -944,7 +947,13 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
}
}
size = sizeof(orb->page_table[0]) * j;
fw_memcpy_to_be32(orb->page_table, orb->page_table,
sizeof(orb->page_table[0]) * j);
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
if (dma_mapping_error(orb->page_table_bus))
goto fail_page_table;
/*
* The data_descriptor pointer is the one case where we need
@ -953,20 +962,12 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
* initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high.
*/
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
size, DMA_TO_DEVICE);
if (dma_mapping_error(orb->page_table_bus))
goto fail_page_table;
orb->request.data_descriptor.high = sd->address_high;
orb->request.data_descriptor.low = orb->page_table_bus;
orb->request.misc |=
COMMAND_ORB_PAGE_TABLE_PRESENT |
COMMAND_ORB_DATA_SIZE(j);
fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
return 0;
fail_page_table:
@ -991,7 +992,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
* transfer direction not handled.
*/
if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
@ -1005,11 +1006,6 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping;
orb->unit = unit;
orb->done = done;
@ -1024,8 +1020,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
* if we set this to max_speed + 7, we get the right value.
*/
orb->request.misc =
COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
COMMAND_ORB_SPEED(device->node->max_speed) |
COMMAND_ORB_MAX_PAYLOAD(device->max_speed + 7) |
COMMAND_ORB_SPEED(device->max_speed) |
COMMAND_ORB_NOTIFY;
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
@ -1036,7 +1032,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
goto fail_map_payload;
goto fail_mapping;
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
@ -1045,15 +1041,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
orb->base.callback = complete_command_orb;
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping;
sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
sd->command_block_agent_address + SBP2_ORB_POINTER);
return 0;
fail_map_payload:
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
fail_mapping:
kfree(orb);
fail_alloc:
@ -1087,7 +1085,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
sdev->fix_capacity = 1;
}
if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
return 0;
}

Просмотреть файл

@ -135,17 +135,17 @@ static void update_hop_count(struct fw_node *node)
int i;
for (i = 0; i < node->port_count; i++) {
if (node->ports[i].node == NULL)
if (node->ports[i] == NULL)
continue;
if (node->ports[i].node->max_hops > max_child_hops)
max_child_hops = node->ports[i].node->max_hops;
if (node->ports[i]->max_hops > max_child_hops)
max_child_hops = node->ports[i]->max_hops;
if (node->ports[i].node->max_depth > depths[0]) {
if (node->ports[i]->max_depth > depths[0]) {
depths[1] = depths[0];
depths[0] = node->ports[i].node->max_depth;
} else if (node->ports[i].node->max_depth > depths[1])
depths[1] = node->ports[i].node->max_depth;
depths[0] = node->ports[i]->max_depth;
} else if (node->ports[i]->max_depth > depths[1])
depths[1] = node->ports[i]->max_depth;
}
node->max_depth = depths[0] + 1;
@ -172,7 +172,8 @@ static struct fw_node *build_tree(struct fw_card *card,
struct list_head stack, *h;
u32 *next_sid, *end, q;
int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
int gap_count, topology_type;
int gap_count;
bool beta_repeaters_present;
local_node = NULL;
node = NULL;
@ -182,7 +183,7 @@ static struct fw_node *build_tree(struct fw_card *card,
phy_id = 0;
irm_node = NULL;
gap_count = SELF_ID_GAP_COUNT(*sid);
topology_type = 0;
beta_repeaters_present = false;
while (sid < end) {
next_sid = count_ports(sid, &port_count, &child_port_count);
@ -214,7 +215,7 @@ static struct fw_node *build_tree(struct fw_card *card,
node = fw_node_create(q, port_count, card->color);
if (node == NULL) {
fw_error("Out of memory while building topology.");
fw_error("Out of memory while building topology.\n");
return NULL;
}
@ -224,11 +225,6 @@ static struct fw_node *build_tree(struct fw_card *card,
if (SELF_ID_CONTENDER(q))
irm_node = node;
if (node->phy_speed == SCODE_BETA)
topology_type |= FW_TOPOLOGY_B;
else
topology_type |= FW_TOPOLOGY_A;
parent_count = 0;
for (i = 0; i < port_count; i++) {
@ -249,12 +245,12 @@ static struct fw_node *build_tree(struct fw_card *card,
break;
case SELFID_PORT_CHILD:
node->ports[i].node = child;
node->ports[i] = child;
/*
* Fix up parent reference for this
* child node.
*/
child->ports[child->color].node = node;
child->ports[child->color] = node;
child->color = card->color;
child = fw_node(child->link.next);
break;
@ -278,6 +274,10 @@ static struct fw_node *build_tree(struct fw_card *card,
list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count;
if (node->phy_speed == SCODE_BETA &&
parent_count + child_port_count > 1)
beta_repeaters_present = true;
/*
* If all PHYs does not report the same gap count
* setting, we fall back to 63 which will force a gap
@ -295,7 +295,7 @@ static struct fw_node *build_tree(struct fw_card *card,
card->root_node = node;
card->irm_node = irm_node;
card->gap_count = gap_count;
card->topology_type = topology_type;
card->beta_repeaters_present = beta_repeaters_present;
return local_node;
}
@ -321,7 +321,7 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
node->color = card->color;
for (i = 0; i < node->port_count; i++) {
child = node->ports[i].node;
child = node->ports[i];
if (!child)
continue;
if (child->color == card->color)
@ -382,11 +382,11 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
struct fw_node *tree;
int i;
tree = node1->ports[port].node;
node0->ports[port].node = tree;
tree = node1->ports[port];
node0->ports[port] = tree;
for (i = 0; i < tree->port_count; i++) {
if (tree->ports[i].node == node1) {
tree->ports[i].node = node0;
if (tree->ports[i] == node1) {
tree->ports[i] = node0;
break;
}
}
@ -437,19 +437,17 @@ update_tree(struct fw_card *card, struct fw_node *root)
card->irm_node = node0;
for (i = 0; i < node0->port_count; i++) {
if (node0->ports[i].node && node1->ports[i].node) {
if (node0->ports[i] && node1->ports[i]) {
/*
* This port didn't change, queue the
* connected node for further
* investigation.
*/
if (node0->ports[i].node->color == card->color)
if (node0->ports[i]->color == card->color)
continue;
list_add_tail(&node0->ports[i].node->link,
&list0);
list_add_tail(&node1->ports[i].node->link,
&list1);
} else if (node0->ports[i].node) {
list_add_tail(&node0->ports[i]->link, &list0);
list_add_tail(&node1->ports[i]->link, &list1);
} else if (node0->ports[i]) {
/*
* The nodes connected here were
* unplugged; unref the lost nodes and
@ -457,10 +455,10 @@ update_tree(struct fw_card *card, struct fw_node *root)
* them.
*/
for_each_fw_node(card, node0->ports[i].node,
for_each_fw_node(card, node0->ports[i],
report_lost_node);
node0->ports[i].node = NULL;
} else if (node1->ports[i].node) {
node0->ports[i] = NULL;
} else if (node1->ports[i]) {
/*
* One or more node were connected to
* this port. Move the new nodes into
@ -468,7 +466,7 @@ update_tree(struct fw_card *card, struct fw_node *root)
* callbacks for them.
*/
move_tree(node0, node1, i);
for_each_fw_node(card, node0->ports[i].node,
for_each_fw_node(card, node0->ports[i],
report_found_node);
}
}

Просмотреть файл

@ -19,12 +19,6 @@
#ifndef __fw_topology_h
#define __fw_topology_h
enum {
FW_TOPOLOGY_A = 0x01,
FW_TOPOLOGY_B = 0x02,
FW_TOPOLOGY_MIXED = 0x03,
};
enum {
FW_NODE_CREATED = 0x00,
FW_NODE_UPDATED = 0x01,
@ -33,21 +27,16 @@ enum {
FW_NODE_LINK_OFF = 0x04,
};
struct fw_port {
struct fw_node *node;
unsigned speed : 3; /* S100, S200, ... S3200 */
};
struct fw_node {
u16 node_id;
u8 color;
u8 port_count;
unsigned link_on : 1;
unsigned initiated_reset : 1;
unsigned b_path : 1;
u8 phy_speed : 3; /* As in the self ID packet. */
u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
* the path from the local node to this node. */
u8 link_on : 1;
u8 initiated_reset : 1;
u8 b_path : 1;
u8 phy_speed : 2; /* As in the self ID packet. */
u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
* local node to this node. */
u8 max_depth : 4; /* Maximum depth to any leaf node */
u8 max_hops : 4; /* Max hops in this sub tree */
atomic_t ref_count;
@ -58,7 +47,7 @@ struct fw_node {
/* Upper layer specific data. */
void *data;
struct fw_port ports[0];
struct fw_node *ports[0];
};
static inline struct fw_node *

Просмотреть файл

@ -81,7 +81,6 @@
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
static inline void
fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
@ -246,7 +245,7 @@ struct fw_card {
struct fw_node *irm_node;
int color;
int gap_count;
int topology_type;
bool beta_repeaters_present;
int index;

Просмотреть файл

@ -2280,7 +2280,7 @@ static void dv1394_remove_host(struct hpsb_host *host)
} while (video);
if (found_ohci_card)
class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2)));
}
@ -2295,9 +2295,9 @@ static void dv1394_add_host(struct hpsb_host *host)
ohci = (struct ti_ohci *)host->hostdata;
class_device_create(hpsb_protocol_class, NULL, MKDEV(
IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
NULL, "dv1394-%d", id);
device_create(hpsb_protocol_class, NULL, MKDEV(
IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
"dv1394-%d", id);
dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);

Просмотреть файл

@ -599,9 +599,7 @@ static void ether1394_add_host(struct hpsb_host *host)
}
SET_MODULE_OWNER(dev);
/* This used to be &host->device in Linux 2.6.20 and before. */
SET_NETDEV_DEV(dev, host->device.parent);
SET_NETDEV_DEV(dev, &host->device);
priv = netdev_priv(dev);
INIT_LIST_HEAD(&priv->ip_node_list);

Просмотреть файл

@ -483,37 +483,6 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval;
}
/**
* hpsb_listen_channel - enable receving a certain isochronous channel
*
* Reception is handled through the @hl's iso_receive op.
*/
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
if (channel > 63) {
HPSB_ERR("%s called with invalid channel", __FUNCTION__);
return -EINVAL;
}
if (host->iso_listen_count[channel]++ == 0)
return host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
return 0;
}
/**
* hpsb_unlisten_channel - disable receving a certain isochronous channel
*/
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
if (channel > 63) {
HPSB_ERR("%s called with invalid channel", __FUNCTION__);
return;
}
if (--host->iso_listen_count[channel] == 0)
host->driver->devctl(host, ISO_UNLISTEN_CHANNEL, channel);
}
static void init_hpsb_highlevel(struct hpsb_host *host)
{
INIT_LIST_HEAD(&dummy_zero_addr.host_list);
@ -570,20 +539,6 @@ void highlevel_host_reset(struct hpsb_host *host)
read_unlock_irqrestore(&hl_irqs_lock, flags);
}
void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length)
{
unsigned long flags;
struct hpsb_highlevel *hl;
int channel = (((quadlet_t *)data)[0] >> 8) & 0x3f;
read_lock_irqsave(&hl_irqs_lock, flags);
list_for_each_entry(hl, &hl_irqs, irq_list) {
if (hl->iso_receive)
hl->iso_receive(host, channel, data, length);
}
read_unlock_irqrestore(&hl_irqs_lock, flags);
}
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
void *data, size_t length)
{

Просмотреть файл

@ -26,9 +26,7 @@ struct hpsb_address_serve {
struct hpsb_highlevel {
const char *name;
/* Any of the following pointers can legally be NULL, except for
* iso_receive which can only be NULL when you don't request
* channels. */
/* Any of the following pointers can legally be NULL. */
/* New host initialized. Will also be called during
* hpsb_register_highlevel for all hosts already installed. */
@ -43,13 +41,6 @@ struct hpsb_highlevel {
* You can not expect to be able to do stock hpsb_reads. */
void (*host_reset)(struct hpsb_host *host);
/* An isochronous packet was received. Channel contains the channel
* number for your convenience, it is also contained in the included
* packet header (first quadlet, CRCs are missing). You may get called
* for channel/host combinations you did not request. */
void (*iso_receive)(struct hpsb_host *host, int channel,
quadlet_t *data, size_t length);
/* A write request was received on either the FCP_COMMAND (direction =
* 0) or the FCP_RESPONSE (direction = 1) register. The cts arg
* contains the cts field (first byte of data). */
@ -109,7 +100,6 @@ int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length);
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
void *data, size_t length);
@ -125,10 +115,6 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
struct hpsb_address_ops *ops, u64 start, u64 end);
int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
u64 start);
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,

Просмотреть файл

@ -154,15 +154,16 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
h->device.parent = dev;
set_dev_node(&h->device, dev_to_node(dev));
snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id);
h->class_dev.dev = &h->device;
h->class_dev.class = &hpsb_host_class;
snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id);
h->host_dev.parent = &h->device;
h->host_dev.class = &hpsb_host_class;
snprintf(h->host_dev.bus_id, BUS_ID_SIZE, "fw-host%d", h->id);
if (device_register(&h->device))
goto fail;
if (class_device_register(&h->class_dev)) {
if (device_register(&h->host_dev)) {
device_unregister(&h->device);
goto fail;
}
@ -202,7 +203,7 @@ void hpsb_remove_host(struct hpsb_host *host)
host->driver = &dummy_driver;
highlevel_remove_host(host);
class_device_unregister(&host->class_dev);
device_unregister(&host->host_dev);
device_unregister(&host->device);
}

Просмотреть файл

@ -28,8 +28,6 @@ struct hpsb_host {
struct timer_list timeout;
unsigned long timeout_interval;
unsigned char iso_listen_count[64];
int node_count; /* number of identified nodes on this bus */
int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes with active link layer */
@ -57,7 +55,7 @@ struct hpsb_host {
struct hpsb_host_driver *driver;
struct pci_dev *pdev;
struct device device;
struct class_device class_dev;
struct device host_dev;
struct delayed_work delayed_reset;
unsigned config_roms:31;
@ -99,12 +97,6 @@ enum devctl_cmd {
/* Cancel all outstanding async requests without resetting the bus.
* Return void. */
CANCEL_REQUESTS,
/* Start or stop receiving isochronous channel in arg. Return void.
* This acts as an optimization hint, hosts are not required not to
* listen on unrequested channels. */
ISO_LISTEN_CHANNEL,
ISO_UNLISTEN_CHANNEL
};
enum isoctl_cmd {

Просмотреть файл

@ -1028,11 +1028,6 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
handle_incoming_packet(host, tcode, data, size, write_acked);
break;
case TCODE_ISO_DATA:
highlevel_iso_receive(host, data, size);
break;
case TCODE_CYCLE_START:
/* simply ignore this packet if it is passed on */
break;
@ -1316,7 +1311,6 @@ EXPORT_SYMBOL(hpsb_make_streampacket);
EXPORT_SYMBOL(hpsb_make_lockpacket);
EXPORT_SYMBOL(hpsb_make_lock64packet);
EXPORT_SYMBOL(hpsb_make_phypacket);
EXPORT_SYMBOL(hpsb_make_isopacket);
EXPORT_SYMBOL(hpsb_read);
EXPORT_SYMBOL(hpsb_write);
EXPORT_SYMBOL(hpsb_packet_success);
@ -1327,8 +1321,6 @@ EXPORT_SYMBOL(hpsb_unregister_highlevel);
EXPORT_SYMBOL(hpsb_register_addrspace);
EXPORT_SYMBOL(hpsb_unregister_addrspace);
EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
EXPORT_SYMBOL(hpsb_listen_channel);
EXPORT_SYMBOL(hpsb_unlisten_channel);
EXPORT_SYMBOL(hpsb_get_hostinfo);
EXPORT_SYMBOL(hpsb_create_hostinfo);
EXPORT_SYMBOL(hpsb_destroy_hostinfo);

Просмотреть файл

@ -24,9 +24,8 @@ struct hpsb_packet {
nodeid_t node_id;
/* Async and Iso types should be clear, raw means send-as-is, do not
* CRC! Byte swapping shall still be done in this case. */
enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
/* hpsb_raw = send as-is, do not CRC (but still byte-swap it) */
enum { hpsb_async, hpsb_raw } __attribute__((packed)) type;
/* Okay, this is core internal and a no care for hosts.
* queued = queued for sending
@ -37,7 +36,7 @@ struct hpsb_packet {
hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
} __attribute__((packed)) state;
/* These are core internal. */
/* These are core-internal. */
signed char tlabel;
signed char ack_code;
unsigned char tcode;
@ -62,11 +61,15 @@ struct hpsb_packet {
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
/* Sizes are in bytes. *data can be DMA-mapped. */
/* Core-internal. */
size_t allocated_data_size; /* as allocated */
/* Sizes are in bytes. To be set by caller of hpsb_alloc_packet. */
size_t data_size; /* as filled in */
size_t header_size; /* as filled in, not counting the CRC */
quadlet_t *data;
/* Buffers */
quadlet_t *data; /* can be DMA-mapped */
quadlet_t header[5];
quadlet_t embedded_data[0]; /* keep as last member */
};

Просмотреть файл

@ -89,18 +89,6 @@ static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
packet->expect_response = 1;
}
static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
int tag, int sync)
{
packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
| (TCODE_ISO_DATA << 4) | sync;
packet->header_size = 4;
packet->data_size = length;
packet->type = hpsb_iso;
packet->tcode = TCODE_ISO_DATA;
}
static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
{
packet->header[0] = data;
@ -491,24 +479,6 @@ struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
return p;
}
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
int length, int channel,
int tag, int sync)
{
struct hpsb_packet *p;
p = hpsb_alloc_packet(length);
if (!p)
return NULL;
p->host = host;
fill_iso_packet(p, length, channel, tag, sync);
p->generation = get_hpsb_generation(host);
return p;
}
/*
* FIXME - these functions should probably read from / write to user space to
* avoid in kernel buffers for user space callers

Просмотреть файл

@ -19,8 +19,6 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
nodeid_t node, u64 addr, int extcode,
octlet_t *data, octlet_t arg);
struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data);
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, int length,
int channel, int tag, int sync);
struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
quadlet_t *buffer, size_t length);

Просмотреть файл

@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include "csr.h"
#include "highlevel.h"
@ -145,8 +146,6 @@ static struct csr1212_bus_ops nodemgr_csr_ops = {
* but now we are much simpler because of the LDM.
*/
static DEFINE_MUTEX(nodemgr_serialize);
struct host_info {
struct hpsb_host *host;
struct list_head list;
@ -154,7 +153,7 @@ struct host_info {
};
static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
static int nodemgr_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size);
static void nodemgr_resume_ne(struct node_entry *ne);
static void nodemgr_remove_ne(struct node_entry *ne);
@ -165,37 +164,38 @@ struct bus_type ieee1394_bus_type = {
.match = nodemgr_bus_match,
};
static void host_cls_release(struct class_device *class_dev)
static void host_cls_release(struct device *dev)
{
put_device(&container_of((class_dev), struct hpsb_host, class_dev)->device);
put_device(&container_of((dev), struct hpsb_host, host_dev)->device);
}
struct class hpsb_host_class = {
.name = "ieee1394_host",
.release = host_cls_release,
.dev_release = host_cls_release,
};
static void ne_cls_release(struct class_device *class_dev)
static void ne_cls_release(struct device *dev)
{
put_device(&container_of((class_dev), struct node_entry, class_dev)->device);
put_device(&container_of((dev), struct node_entry, node_dev)->device);
}
static struct class nodemgr_ne_class = {
.name = "ieee1394_node",
.release = ne_cls_release,
.dev_release = ne_cls_release,
};
static void ud_cls_release(struct class_device *class_dev)
static void ud_cls_release(struct device *dev)
{
put_device(&container_of((class_dev), struct unit_directory, class_dev)->device);
put_device(&container_of((dev), struct unit_directory, unit_dev)->device);
}
/* The name here is only so that unit directory hotplug works with old
* style hotplug, which only ever did unit directories anyway. */
* style hotplug, which only ever did unit directories anyway.
*/
static struct class nodemgr_ud_class = {
.name = "ieee1394",
.release = ud_cls_release,
.uevent = nodemgr_uevent,
.dev_release = ud_cls_release,
.dev_uevent = nodemgr_uevent,
};
static struct hpsb_highlevel nodemgr_highlevel;
@ -730,11 +730,11 @@ static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
static void nodemgr_remove_uds(struct node_entry *ne)
{
struct class_device *cdev;
struct device *dev;
struct unit_directory *tmp, *ud;
/* Iteration over nodemgr_ud_class.children has to be protected by
* nodemgr_ud_class.sem, but class_device_unregister() will eventually
/* Iteration over nodemgr_ud_class.devices has to be protected by
* nodemgr_ud_class.sem, but device_unregister() will eventually
* take nodemgr_ud_class.sem too. Therefore pick out one ud at a time,
* release the semaphore, and then unregister the ud. Since this code
* may be called from other contexts besides the knodemgrds, protect the
@ -744,9 +744,9 @@ static void nodemgr_remove_uds(struct node_entry *ne)
for (;;) {
ud = NULL;
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
tmp = container_of(cdev, struct unit_directory,
class_dev);
list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
tmp = container_of(dev, struct unit_directory,
unit_dev);
if (tmp->ne == ne) {
ud = tmp;
break;
@ -755,7 +755,7 @@ static void nodemgr_remove_uds(struct node_entry *ne)
up(&nodemgr_ud_class.sem);
if (ud == NULL)
break;
class_device_unregister(&ud->class_dev);
device_unregister(&ud->unit_dev);
device_unregister(&ud->device);
}
mutex_unlock(&nodemgr_serialize_remove_uds);
@ -772,10 +772,9 @@ static void nodemgr_remove_ne(struct node_entry *ne)
HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
nodemgr_remove_uds(ne);
class_device_unregister(&ne->class_dev);
device_unregister(&ne->node_dev);
device_unregister(dev);
put_device(dev);
@ -783,7 +782,9 @@ static void nodemgr_remove_ne(struct node_entry *ne)
static int __nodemgr_remove_host_dev(struct device *dev, void *data)
{
nodemgr_remove_ne(container_of(dev, struct node_entry, device));
if (dev->bus == &ieee1394_bus_type)
nodemgr_remove_ne(container_of(dev, struct node_entry,
device));
return 0;
}
@ -850,14 +851,14 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx",
(unsigned long long)(ne->guid));
ne->class_dev.dev = &ne->device;
ne->class_dev.class = &nodemgr_ne_class;
snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx",
(unsigned long long)(ne->guid));
ne->node_dev.parent = &ne->device;
ne->node_dev.class = &nodemgr_ne_class;
snprintf(ne->node_dev.bus_id, BUS_ID_SIZE, "%016Lx",
(unsigned long long)(ne->guid));
if (device_register(&ne->device))
goto fail_devreg;
if (class_device_register(&ne->class_dev))
if (device_register(&ne->node_dev))
goto fail_classdevreg;
get_device(&ne->device);
@ -885,12 +886,12 @@ fail_alloc:
static struct node_entry *find_entry_by_guid(u64 guid)
{
struct class_device *cdev;
struct device *dev;
struct node_entry *ne, *ret_ne = NULL;
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
ne = container_of(dev, struct node_entry, node_dev);
if (ne->guid == guid) {
ret_ne = ne;
@ -906,12 +907,12 @@ static struct node_entry *find_entry_by_guid(u64 guid)
static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
nodeid_t nodeid)
{
struct class_device *cdev;
struct device *dev;
struct node_entry *ne, *ret_ne = NULL;
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
ne = container_of(dev, struct node_entry, node_dev);
if (ne->host == host && ne->nodeid == nodeid) {
ret_ne = ne;
@ -935,14 +936,14 @@ static void nodemgr_register_device(struct node_entry *ne,
snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u",
ne->device.bus_id, ud->id);
ud->class_dev.dev = &ud->device;
ud->class_dev.class = &nodemgr_ud_class;
snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u",
ud->unit_dev.parent = &ud->device;
ud->unit_dev.class = &nodemgr_ud_class;
snprintf(ud->unit_dev.bus_id, BUS_ID_SIZE, "%s-%u",
ne->device.bus_id, ud->id);
if (device_register(&ud->device))
goto fail_devreg;
if (class_device_register(&ud->class_dev))
if (device_register(&ud->unit_dev))
goto fail_classdevreg;
get_device(&ud->device);
@ -1159,7 +1160,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
#ifdef CONFIG_HOTPLUG
static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
static int nodemgr_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
struct unit_directory *ud;
@ -1169,10 +1170,10 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
/* ieee1394:venNmoNspNverN */
char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
if (!cdev)
if (!dev)
return -ENODEV;
ud = container_of(cdev, struct unit_directory, class_dev);
ud = container_of(dev, struct unit_directory, unit_dev);
if (ud->ne->in_limbo || ud->ignore_driver)
return -ENODEV;
@ -1207,7 +1208,7 @@ do { \
#else
static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
static int nodemgr_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
return -ENODEV;
@ -1378,8 +1379,10 @@ static void nodemgr_node_scan(struct host_info *hi, int generation)
static void nodemgr_suspend_ne(struct node_entry *ne)
{
struct class_device *cdev;
struct device *dev;
struct unit_directory *ud;
struct device_driver *drv;
int error;
HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
@ -1388,15 +1391,24 @@ static void nodemgr_suspend_ne(struct node_entry *ne)
WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
ud = container_of(dev, struct unit_directory, unit_dev);
if (ud->ne != ne)
continue;
if (ud->device.driver &&
(!ud->device.driver->suspend ||
ud->device.driver->suspend(&ud->device, PMSG_SUSPEND)))
drv = get_driver(ud->device.driver);
if (!drv)
continue;
error = 1; /* release if suspend is not implemented */
if (drv->suspend) {
down(&ud->device.sem);
error = drv->suspend(&ud->device, PMSG_SUSPEND);
up(&ud->device.sem);
}
if (error)
device_release_driver(&ud->device);
put_driver(drv);
}
up(&nodemgr_ud_class.sem);
}
@ -1404,20 +1416,29 @@ static void nodemgr_suspend_ne(struct node_entry *ne)
static void nodemgr_resume_ne(struct node_entry *ne)
{
struct class_device *cdev;
struct device *dev;
struct unit_directory *ud;
struct device_driver *drv;
ne->in_limbo = 0;
device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
ud = container_of(dev, struct unit_directory, unit_dev);
if (ud->ne != ne)
continue;
if (ud->device.driver && ud->device.driver->resume)
ud->device.driver->resume(&ud->device);
drv = get_driver(ud->device.driver);
if (!drv)
continue;
if (drv->resume) {
down(&ud->device.sem);
drv->resume(&ud->device);
up(&ud->device.sem);
}
put_driver(drv);
}
up(&nodemgr_ud_class.sem);
@ -1428,23 +1449,32 @@ static void nodemgr_resume_ne(struct node_entry *ne)
static void nodemgr_update_pdrv(struct node_entry *ne)
{
struct device *dev;
struct unit_directory *ud;
struct device_driver *drv;
struct hpsb_protocol_driver *pdrv;
struct class_device *cdev;
int error;
down(&nodemgr_ud_class.sem);
list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
ud = container_of(cdev, struct unit_directory, class_dev);
list_for_each_entry(dev, &nodemgr_ud_class.devices, node) {
ud = container_of(dev, struct unit_directory, unit_dev);
if (ud->ne != ne)
continue;
if (ud->device.driver) {
pdrv = container_of(ud->device.driver,
struct hpsb_protocol_driver,
driver);
if (pdrv->update && pdrv->update(ud))
device_release_driver(&ud->device);
drv = get_driver(ud->device.driver);
if (!drv)
continue;
error = 0;
pdrv = container_of(drv, struct hpsb_protocol_driver, driver);
if (pdrv->update) {
down(&ud->device.sem);
error = pdrv->update(ud);
up(&ud->device.sem);
}
if (error)
device_release_driver(&ud->device);
put_driver(drv);
}
up(&nodemgr_ud_class.sem);
}
@ -1509,7 +1539,7 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
static void nodemgr_node_probe(struct host_info *hi, int generation)
{
struct hpsb_host *host = hi->host;
struct class_device *cdev;
struct device *dev;
struct node_entry *ne;
/* Do some processing of the nodes we've probed. This pulls them
@ -1522,13 +1552,13 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
* improvement...) */
down(&nodemgr_ne_class.sem);
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
ne = container_of(dev, struct node_entry, node_dev);
if (!ne->needs_probe)
nodemgr_probe_ne(hi, ne, generation);
}
list_for_each_entry(cdev, &nodemgr_ne_class.children, node) {
ne = container_of(cdev, struct node_entry, class_dev);
list_for_each_entry(dev, &nodemgr_ne_class.devices, node) {
ne = container_of(dev, struct node_entry, node_dev);
if (ne->needs_probe)
nodemgr_probe_ne(hi, ne, generation);
}
@ -1686,18 +1716,12 @@ static int nodemgr_host_thread(void *__hi)
if (kthread_should_stop())
goto exit;
if (mutex_lock_interruptible(&nodemgr_serialize)) {
if (try_to_freeze())
continue;
goto exit;
}
/* Pause for 1/4 second in 1/16 second intervals,
* to make sure things settle down. */
g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) {
if (msleep_interruptible(63) || kthread_should_stop())
goto unlock_exit;
goto exit;
/* Now get the generation in which the node ID's we collect
* are valid. During the bus scan we will use this generation
@ -1715,7 +1739,6 @@ static int nodemgr_host_thread(void *__hi)
if (!nodemgr_check_irm_capability(host, reset_cycles) ||
!nodemgr_do_irm_duties(host, reset_cycles)) {
reset_cycles++;
mutex_unlock(&nodemgr_serialize);
continue;
}
reset_cycles = 0;
@ -1732,11 +1755,7 @@ static int nodemgr_host_thread(void *__hi)
/* Update some of our sysfs symlinks */
nodemgr_update_host_dev_links(host);
mutex_unlock(&nodemgr_serialize);
}
unlock_exit:
mutex_unlock(&nodemgr_serialize);
exit:
HPSB_VERBOSE("NodeMgr: Exiting thread");
return 0;
@ -1756,13 +1775,13 @@ exit:
*/
int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
{
struct class_device *cdev;
struct device *dev;
struct hpsb_host *host;
int error = 0;
down(&hpsb_host_class.sem);
list_for_each_entry(cdev, &hpsb_host_class.children, node) {
host = container_of(cdev, struct hpsb_host, class_dev);
list_for_each_entry(dev, &hpsb_host_class.devices, node) {
host = container_of(dev, struct hpsb_host, host_dev);
if ((error = cb(host, data)))
break;

Просмотреть файл

@ -84,7 +84,7 @@ struct unit_directory {
int length; /* Number of quadlets */
struct device device;
struct class_device class_dev;
struct device unit_dev;
struct csr1212_keyval *ud_kv;
u32 lun; /* logical unit number immediate value */
@ -107,7 +107,7 @@ struct node_entry {
u32 capabilities;
struct device device;
struct class_device class_dev;
struct device node_dev;
/* Means this node is not attached anymore */
int in_limbo;

Просмотреть файл

@ -138,19 +138,6 @@ printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->
#define DBGMSG(fmt, args...) do {} while (0)
#endif
#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
#define OHCI_DMA_ALLOC(fmt, args...) \
HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
++global_outstanding_dmas, ## args)
#define OHCI_DMA_FREE(fmt, args...) \
HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
--global_outstanding_dmas, ## args)
static int global_outstanding_dmas = 0;
#else
#define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
#define OHCI_DMA_FREE(fmt, args...) do {} while (0)
#endif
/* print general (card independent) information */
#define PRINT_G(level, fmt, args...) \
printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
@ -170,7 +157,6 @@ static void dma_trm_reset(struct dma_trm_ctx *d);
static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
enum context_type type, int ctx, int num_desc,
int buf_size, int split_buf_size, int context_base);
static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
@ -533,9 +519,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
initialize_dma_trm_ctx(&ohci->at_req_context);
initialize_dma_trm_ctx(&ohci->at_resp_context);
/* Initialize IR Legacy DMA channel mask */
ohci->ir_legacy_channels = 0;
/* Accept AR requests from all nodes */
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
@ -733,7 +716,6 @@ static void insert_packet(struct ti_ohci *ohci,
pci_map_single(ohci->dev, packet->data,
packet->data_size,
PCI_DMA_TODEVICE));
OHCI_DMA_ALLOC("single, block transmit packet");
d->prg_cpu[idx]->end.branchAddress = 0;
d->prg_cpu[idx]->end.status = 0;
@ -783,7 +765,6 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->end.address = cpu_to_le32(
pci_map_single(ohci->dev, packet->data,
packet->data_size, PCI_DMA_TODEVICE));
OHCI_DMA_ALLOC("single, iso transmit packet");
d->prg_cpu[idx]->end.branchAddress = 0;
d->prg_cpu[idx]->end.status = 0;
@ -884,36 +865,9 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
return -EOVERFLOW;
}
/* Decide whether we have an iso, a request, or a response packet */
if (packet->type == hpsb_raw)
d = &ohci->at_req_context;
else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
/* The legacy IT DMA context is initialized on first
* use. However, the alloc cannot be run from
* interrupt context, so we bail out if that is the
* case. I don't see anyone sending ISO packets from
* interrupt context anyway... */
if (ohci->it_legacy_context.ohci == NULL) {
if (in_interrupt()) {
PRINT(KERN_ERR,
"legacy IT context cannot be initialized during interrupt");
return -EINVAL;
}
if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
DMA_CTX_ISO, 0, IT_NUM_DESC,
OHCI1394_IsoXmitContextBase) < 0) {
PRINT(KERN_ERR,
"error initializing legacy IT context");
return -ENOMEM;
}
initialize_dma_trm_ctx(&ohci->it_legacy_context);
}
d = &ohci->it_legacy_context;
} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
d = &ohci->at_resp_context;
else
d = &ohci->at_req_context;
@ -932,9 +886,7 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
{
struct ti_ohci *ohci = host->hostdata;
int retval = 0;
unsigned long flags;
int phy_reg;
int retval = 0, phy_reg;
switch (cmd) {
case RESET_BUS:
@ -1027,117 +979,6 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
dma_trm_reset(&ohci->at_resp_context);
break;
case ISO_LISTEN_CHANNEL:
{
u64 mask;
struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
int ir_legacy_active;
if (arg<0 || arg>63) {
PRINT(KERN_ERR,
"%s: IS0 listen channel %d is out of range",
__FUNCTION__, arg);
return -EFAULT;
}
mask = (u64)0x1<<arg;
spin_lock_irqsave(&ohci->IR_channel_lock, flags);
if (ohci->ISO_channel_usage & mask) {
PRINT(KERN_ERR,
"%s: IS0 listen channel %d is already used",
__FUNCTION__, arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
}
ir_legacy_active = ohci->ir_legacy_channels;
ohci->ISO_channel_usage |= mask;
ohci->ir_legacy_channels |= mask;
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
if (!ir_legacy_active) {
if (ohci1394_register_iso_tasklet(ohci,
&ohci->ir_legacy_tasklet) < 0) {
PRINT(KERN_ERR, "No IR DMA context available");
return -EBUSY;
}
/* the IR context can be assigned to any DMA context
* by ohci1394_register_iso_tasklet */
d->ctx = ohci->ir_legacy_tasklet.context;
d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
32*d->ctx;
d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
32*d->ctx;
d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
if (printk_ratelimit())
DBGMSG("IR legacy activated");
}
spin_lock_irqsave(&ohci->IR_channel_lock, flags);
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1<<(arg-32));
else
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
DBGMSG("Listening enabled on channel %d", arg);
break;
}
case ISO_UNLISTEN_CHANNEL:
{
u64 mask;
if (arg<0 || arg>63) {
PRINT(KERN_ERR,
"%s: IS0 unlisten channel %d is out of range",
__FUNCTION__, arg);
return -EFAULT;
}
mask = (u64)0x1<<arg;
spin_lock_irqsave(&ohci->IR_channel_lock, flags);
if (!(ohci->ISO_channel_usage & mask)) {
PRINT(KERN_ERR,
"%s: IS0 unlisten channel %d is not used",
__FUNCTION__, arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
}
ohci->ISO_channel_usage &= ~mask;
ohci->ir_legacy_channels &= ~mask;
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1<<(arg-32));
else
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
DBGMSG("Listening disabled on channel %d", arg);
if (ohci->ir_legacy_channels == 0) {
stop_dma_rcv_ctx(&ohci->ir_legacy_context);
DBGMSG("ISO legacy receive context stopped");
}
break;
}
default:
PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
cmd);
@ -2869,12 +2710,10 @@ static void dma_trm_tasklet (unsigned long data)
list_del_init(&packet->driver_list);
hpsb_packet_sent(ohci->host, packet, ack);
if (datasize) {
if (datasize)
pci_unmap_single(ohci->dev,
cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
datasize, PCI_DMA_TODEVICE);
OHCI_DMA_FREE("single Xmit data packet");
}
d->sent_ind = (d->sent_ind+1)%d->num_desc;
d->free_prgs++;
@ -2885,22 +2724,6 @@ static void dma_trm_tasklet (unsigned long data)
spin_unlock_irqrestore(&d->lock, flags);
}
static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
{
if (d->ctrlClear) {
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
if (d->type == DMA_CTX_ISO) {
/* disable interrupts */
reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
} else {
tasklet_kill(&d->task);
}
}
}
static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
{
int i;
@ -2913,23 +2736,19 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
if (d->buf_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->buf_cpu[i] && d->buf_bus[i]) {
if (d->buf_cpu[i] && d->buf_bus[i])
pci_free_consistent(
ohci->dev, d->buf_size,
d->buf_cpu[i], d->buf_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
}
kfree(d->buf_cpu);
kfree(d->buf_bus);
}
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
}
if (d->prg_cpu[i] && d->prg_bus[i])
pci_pool_free(d->prg_pool, d->prg_cpu[i],
d->prg_bus[i]);
pci_pool_destroy(d->prg_pool);
OHCI_DMA_FREE("dma_rcv prg pool");
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
@ -2998,13 +2817,10 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
}
num_allocs++;
OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i=0; i<d->num_desc; i++) {
d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
d->buf_size,
d->buf_bus+i);
OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
if (d->buf_cpu[i] != NULL) {
memset(d->buf_cpu[i], 0, d->buf_size);
@ -3016,7 +2832,6 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
}
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
@ -3030,18 +2845,11 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
spin_lock_init(&d->lock);
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
OHCI_ISO_MULTICHANNEL_RECEIVE,
dma_rcv_tasklet, (unsigned long) d);
} else {
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
}
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
return 0;
}
@ -3057,12 +2865,10 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d)
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
}
if (d->prg_cpu[i] && d->prg_bus[i])
pci_pool_free(d->prg_pool, d->prg_cpu[i],
d->prg_bus[i]);
pci_pool_destroy(d->prg_pool);
OHCI_DMA_FREE("dma_trm prg pool");
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
@ -3108,11 +2914,8 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
}
num_allocs++;
OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i = 0; i < d->num_desc; i++) {
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
@ -3127,28 +2930,10 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
spin_lock_init(&d->lock);
/* initialize tasklet */
if (type == DMA_CTX_ISO) {
ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
dma_trm_tasklet, (unsigned long) d);
if (ohci1394_register_iso_tasklet(ohci,
&ohci->it_legacy_tasklet) < 0) {
PRINT(KERN_ERR, "No IT DMA context available");
free_dma_trm_ctx(d);
return -EBUSY;
}
/* IT can be assigned to any context by register_iso_tasklet */
d->ctx = ohci->it_legacy_tasklet.context;
d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
} else {
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
}
d->ctrlSet = context_base + OHCI1394_ContextControlSet;
d->ctrlClear = context_base + OHCI1394_ContextControlClear;
d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
return 0;
}
@ -3294,7 +3079,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->csr_config_rom_cpu =
pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
&ohci->csr_config_rom_bus);
OHCI_DMA_ALLOC("consistent csr_config_rom");
if (ohci->csr_config_rom_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate buffer config rom");
ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
@ -3303,8 +3087,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->selfid_buf_cpu =
pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
&ohci->selfid_buf_bus);
OHCI_DMA_ALLOC("consistent selfid_buf");
if (ohci->selfid_buf_cpu == NULL)
FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
@ -3377,20 +3159,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->ISO_channel_usage = 0;
spin_lock_init(&ohci->IR_channel_lock);
/* Allocate the IR DMA context right here so we don't have
* to do it in interrupt path - note that this doesn't
* waste much memory and avoids the jugglery required to
* allocate it in IRQ path. */
if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
DMA_CTX_ISO, 0, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextBase) < 0) {
FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
}
/* We hopefully don't have to pre-allocate IT DMA like we did
* for IR DMA above. Allocate it on-demand and mark inactive. */
ohci->it_legacy_context.ohci = NULL;
spin_lock_init(&ohci->event_lock);
/*
@ -3483,20 +3251,16 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
free_dma_rcv_ctx(&ohci->ar_resp_context);
free_dma_trm_ctx(&ohci->at_req_context);
free_dma_trm_ctx(&ohci->at_resp_context);
free_dma_rcv_ctx(&ohci->ir_legacy_context);
free_dma_trm_ctx(&ohci->it_legacy_context);
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
ohci->selfid_buf_bus);
OHCI_DMA_FREE("consistent selfid_buf");
case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
ohci->csr_config_rom_cpu,
ohci->csr_config_rom_bus);
OHCI_DMA_FREE("consistent csr_config_rom");
case OHCI_INIT_HAVE_IOMAPPING:
iounmap(ohci->registers);

Просмотреть файл

@ -190,23 +190,10 @@ struct ti_ohci {
unsigned long ir_multichannel_used; /* ditto */
spinlock_t IR_channel_lock;
/* iso receive (legacy API) */
u64 ir_legacy_channels; /* note: this differs from ISO_channel_usage;
it only accounts for channels listened to
by the legacy API, so that we can know when
it is safe to free the legacy API context */
struct dma_rcv_ctx ir_legacy_context;
struct ohci1394_iso_tasklet ir_legacy_tasklet;
/* iso transmit */
int nb_iso_xmit_ctx;
unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
/* iso transmit (legacy API) */
struct dma_trm_ctx it_legacy_context;
struct ohci1394_iso_tasklet it_legacy_tasklet;
u64 ISO_channel_usage;
/* IEEE-1394 part follows */
@ -221,7 +208,6 @@ struct ti_ohci {
/* Tasklets for iso receive and transmit, used by video1394
* and dv1394 */
struct list_head iso_tasklet_list;
spinlock_t iso_tasklet_list_lock;

Просмотреть файл

@ -477,7 +477,11 @@ static void send_next(struct ti_lynx *lynx, int what)
struct lynx_send_data *d;
struct hpsb_packet *packet;
#if 0 /* has been removed from ieee1394 core */
d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
#else
d = &lynx->async;
#endif
if (!list_empty(&d->pcl_queue)) {
PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
BUG();
@ -511,9 +515,11 @@ static void send_next(struct ti_lynx *lynx, int what)
case hpsb_async:
pcl.buffer[0].control |= PCL_CMD_XMT;
break;
#if 0 /* has been removed from ieee1394 core */
case hpsb_iso:
pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
break;
#endif
case hpsb_raw:
pcl.buffer[0].control |= PCL_CMD_UNFXMT;
break;
@ -542,9 +548,11 @@ static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
case hpsb_raw:
d = &lynx->async;
break;
#if 0 /* has been removed from ieee1394 core */
case hpsb_iso:
d = &lynx->iso_send;
break;
#endif
default:
PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
packet->type);
@ -797,7 +805,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
}
break;
#if 0 /* has been removed from ieee1394 core */
case ISO_LISTEN_CHANNEL:
spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
@ -819,7 +827,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
break;
#endif
default:
PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
retval = -1;
@ -1009,11 +1017,11 @@ static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
packet->data_size, PCI_DMA_TODEVICE);
}
#if 0 /* has been removed from ieee1394 core */
if (!list_empty(&lynx->iso_send.queue)) {
send_next(lynx, hpsb_iso);
}
#endif
spin_unlock(&lynx->iso_send.queue_lock);
if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {

Просмотреть файл

@ -36,11 +36,6 @@ struct file_info {
u8 __user *fcp_buffer;
/* old ISO API */
u64 listen_channels;
quadlet_t __user *iso_buffer;
size_t iso_buffer_length;
u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
/* new rawiso API */

Просмотреть файл

@ -98,21 +98,6 @@ static struct hpsb_address_ops arm_ops = {
static void queue_complete_cb(struct pending_request *req);
#include <asm/current.h>
static void print_old_iso_deprecation(void)
{
static pid_t p;
if (p == current->pid)
return;
p = current->pid;
printk(KERN_WARNING "raw1394: WARNING - Program \"%s\" uses unsupported"
" isochronous request types which will be removed in a next"
" kernel release\n", current->comm);
printk(KERN_WARNING "raw1394: Update your software to use libraw1394's"
" newer interface\n");
}
static struct pending_request *__alloc_pending_request(gfp_t flags)
{
struct pending_request *req;
@ -297,67 +282,6 @@ static void host_reset(struct hpsb_host *host)
spin_unlock_irqrestore(&host_info_lock, flags);
}
static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
size_t length)
{
unsigned long flags;
struct host_info *hi;
struct file_info *fi;
struct pending_request *req, *req_next;
struct iso_block_store *ibs = NULL;
LIST_HEAD(reqs);
if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
HPSB_INFO("dropped iso packet");
return;
}
spin_lock_irqsave(&host_info_lock, flags);
hi = find_host_info(host);
if (hi != NULL) {
list_for_each_entry(fi, &hi->file_info_list, list) {
if (!(fi->listen_channels & (1ULL << channel)))
continue;
req = __alloc_pending_request(GFP_ATOMIC);
if (!req)
break;
if (!ibs) {
ibs = kmalloc(sizeof(*ibs) + length,
GFP_ATOMIC);
if (!ibs) {
kfree(req);
break;
}
atomic_add(length, &iso_buffer_size);
atomic_set(&ibs->refcount, 0);
ibs->data_size = length;
memcpy(ibs->data, data, length);
}
atomic_inc(&ibs->refcount);
req->file_info = fi;
req->ibs = ibs;
req->data = ibs->data;
req->req.type = RAW1394_REQ_ISO_RECEIVE;
req->req.generation = get_hpsb_generation(host);
req->req.misc = 0;
req->req.recvb = ptr2int(fi->iso_buffer);
req->req.length = min(length, fi->iso_buffer_length);
list_add_tail(&req->list, &reqs);
}
}
spin_unlock_irqrestore(&host_info_lock, flags);
list_for_each_entry_safe(req, req_next, &reqs, list)
queue_complete_req(req);
}
static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
int cts, u8 * data, size_t length)
{
@ -434,7 +358,11 @@ struct compat_raw1394_req {
__u64 sendb;
__u64 recvb;
} __attribute__((packed));
}
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
__attribute__((packed))
#endif
;
static const char __user *raw1394_compat_write(const char __user *buf)
{
@ -459,7 +387,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
static int
raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) r;
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
P(type) ||
P(error) ||
@ -587,7 +515,7 @@ static int state_opened(struct file_info *fi, struct pending_request *req)
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
static int state_initialized(struct file_info *fi, struct pending_request *req)
@ -601,7 +529,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
req->req.generation = atomic_read(&internal_generation);
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
switch (req->req.type) {
@ -673,44 +601,7 @@ out_set_card:
}
queue_complete_req(req);
return sizeof(struct raw1394_request);
}
static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
{
int channel = req->req.misc;
if ((channel > 63) || (channel < -64)) {
req->req.error = RAW1394_ERROR_INVALID_ARG;
} else if (channel >= 0) {
/* allocate channel req.misc */
if (fi->listen_channels & (1ULL << channel)) {
req->req.error = RAW1394_ERROR_ALREADY;
} else {
if (hpsb_listen_channel
(&raw1394_highlevel, fi->host, channel)) {
req->req.error = RAW1394_ERROR_ALREADY;
} else {
fi->listen_channels |= 1ULL << channel;
fi->iso_buffer = int2ptr(req->req.recvb);
fi->iso_buffer_length = req->req.length;
}
}
} else {
/* deallocate channel (one's complement neg) req.misc */
channel = ~channel;
if (fi->listen_channels & (1ULL << channel)) {
hpsb_unlisten_channel(&raw1394_highlevel, fi->host,
channel);
fi->listen_channels &= ~(1ULL << channel);
} else {
req->req.error = RAW1394_ERROR_INVALID_ARG;
}
}
req->req.length = 0;
queue_complete_req(req);
return 0;
}
static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
@ -865,7 +756,7 @@ static int handle_async_request(struct file_info *fi,
if (req->req.error) {
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
hpsb_set_packet_complete_task(packet,
@ -883,51 +774,7 @@ static int handle_async_request(struct file_info *fi,
hpsb_free_tlabel(packet);
queue_complete_req(req);
}
return sizeof(struct raw1394_request);
}
static int handle_iso_send(struct file_info *fi, struct pending_request *req,
int channel)
{
unsigned long flags;
struct hpsb_packet *packet;
packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
(req->req.misc >> 16) & 0x3,
req->req.misc & 0xf);
if (!packet)
return -ENOMEM;
packet->speed_code = req->req.address & 0x3;
req->packet = packet;
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
req->req.length)) {
req->req.error = RAW1394_ERROR_MEMFAULT;
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
}
req->req.length = 0;
hpsb_set_packet_complete_task(packet,
(void (*)(void *))queue_complete_req,
req);
spin_lock_irqsave(&fi->reqlists_lock, flags);
list_add_tail(&req->list, &fi->req_pending);
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
/* Update the generation of the packet just before sending. */
packet->generation = req->req.generation;
if (hpsb_send_packet(packet) < 0) {
req->req.error = RAW1394_ERROR_SEND_ERROR;
queue_complete_req(req);
}
return sizeof(struct raw1394_request);
return 0;
}
static int handle_async_send(struct file_info *fi, struct pending_request *req)
@ -943,7 +790,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
req->req.error = RAW1394_ERROR_INVALID_ARG;
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
data_size = req->req.length - header_length;
@ -957,7 +804,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
req->req.error = RAW1394_ERROR_MEMFAULT;
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
if (copy_from_user
@ -966,7 +813,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
req->req.error = RAW1394_ERROR_MEMFAULT;
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
packet->type = hpsb_async;
@ -994,7 +841,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
queue_complete_req(req);
}
return sizeof(struct raw1394_request);
return 0;
}
static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
@ -1869,7 +1716,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
spin_lock_irqsave(&host_info_lock, flags);
list_add_tail(&addr->addr_list, &fi->addr_list);
spin_unlock_irqrestore(&host_info_lock, flags);
return sizeof(struct raw1394_request);
return 0;
}
retval =
hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
@ -1887,7 +1734,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
return (-EALREADY);
}
free_pending_request(req); /* immediate success or fail */
return sizeof(struct raw1394_request);
return 0;
}
static int arm_unregister(struct file_info *fi, struct pending_request *req)
@ -1955,7 +1802,7 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req)
vfree(addr->addr_space_buffer);
kfree(addr);
free_pending_request(req); /* immediate success or fail */
return sizeof(struct raw1394_request);
return 0;
}
retval =
hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
@ -1971,7 +1818,7 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req)
vfree(addr->addr_space_buffer);
kfree(addr);
free_pending_request(req); /* immediate success or fail */
return sizeof(struct raw1394_request);
return 0;
}
/* Copy data from ARM buffer(s) to user buffer. */
@ -2013,7 +1860,7 @@ static int arm_get_buf(struct file_info *fi, struct pending_request *req)
* queue no response, and therefore nobody
* will free it. */
free_pending_request(req);
return sizeof(struct raw1394_request);
return 0;
} else {
DBGMSG("arm_get_buf request exceeded mapping");
spin_unlock_irqrestore(&host_info_lock, flags);
@ -2065,7 +1912,7 @@ static int arm_set_buf(struct file_info *fi, struct pending_request *req)
* queue no response, and therefore nobody
* will free it. */
free_pending_request(req);
return sizeof(struct raw1394_request);
return 0;
} else {
DBGMSG("arm_set_buf request exceeded mapping");
spin_unlock_irqrestore(&host_info_lock, flags);
@ -2086,7 +1933,7 @@ static int reset_notification(struct file_info *fi, struct pending_request *req)
(req->req.misc == RAW1394_NOTIFY_ON)) {
fi->notification = (u8) req->req.misc;
free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
return sizeof(struct raw1394_request);
return 0;
}
/* error EINVAL (22) invalid argument */
return (-EINVAL);
@ -2119,12 +1966,12 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req)
req->req.length = 0;
queue_complete_req(req);
}
return sizeof(struct raw1394_request);
return 0;
}
static int get_config_rom(struct file_info *fi, struct pending_request *req)
{
int ret = sizeof(struct raw1394_request);
int ret = 0;
quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
int status;
@ -2154,7 +2001,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
static int update_config_rom(struct file_info *fi, struct pending_request *req)
{
int ret = sizeof(struct raw1394_request);
int ret = 0;
quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
if (!data)
return -ENOMEM;
@ -2221,7 +2068,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
hpsb_update_config_rom_image(fi->host);
free_pending_request(req);
return sizeof(struct raw1394_request);
return 0;
}
}
@ -2286,7 +2133,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
/* we have to free the request, because we queue no response,
* and therefore nobody will free it */
free_pending_request(req);
return sizeof(struct raw1394_request);
return 0;
} else {
for (dentry =
fi->csr1212_dirs[dr]->value.directory.dentries_head;
@ -2311,11 +2158,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
case RAW1394_REQ_ECHO:
queue_complete_req(req);
return sizeof(struct raw1394_request);
case RAW1394_REQ_ISO_SEND:
print_old_iso_deprecation();
return handle_iso_send(fi, req, node);
return 0;
case RAW1394_REQ_ARM_REGISTER:
return arm_register(fi, req);
@ -2332,27 +2175,30 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
case RAW1394_REQ_RESET_NOTIFY:
return reset_notification(fi, req);
case RAW1394_REQ_ISO_SEND:
case RAW1394_REQ_ISO_LISTEN:
print_old_iso_deprecation();
handle_iso_listen(fi, req);
return sizeof(struct raw1394_request);
printk(KERN_DEBUG "raw1394: old iso ABI has been removed\n");
req->req.error = RAW1394_ERROR_COMPAT;
req->req.misc = RAW1394_KERNELAPI_VERSION;
queue_complete_req(req);
return 0;
case RAW1394_REQ_FCP_LISTEN:
handle_fcp_listen(fi, req);
return sizeof(struct raw1394_request);
return 0;
case RAW1394_REQ_RESET_BUS:
if (req->req.misc == RAW1394_LONG_RESET) {
DBGMSG("busreset called (type: LONG)");
hpsb_reset_bus(fi->host, LONG_RESET);
free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
return sizeof(struct raw1394_request);
return 0;
}
if (req->req.misc == RAW1394_SHORT_RESET) {
DBGMSG("busreset called (type: SHORT)");
hpsb_reset_bus(fi->host, SHORT_RESET);
free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
return sizeof(struct raw1394_request);
return 0;
}
/* error EINVAL (22) invalid argument */
return (-EINVAL);
@ -2371,7 +2217,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
req->req.generation = get_hpsb_generation(fi->host);
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
switch (req->req.type) {
@ -2384,7 +2230,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
if (req->req.length == 0) {
req->req.error = RAW1394_ERROR_INVALID_ARG;
queue_complete_req(req);
return sizeof(struct raw1394_request);
return 0;
}
return handle_async_request(fi, req, node);
@ -2395,7 +2241,7 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
{
struct file_info *fi = (struct file_info *)file->private_data;
struct pending_request *req;
ssize_t retval = 0;
ssize_t retval = -EBADFD;
#ifdef CONFIG_COMPAT
if (count == sizeof(struct compat_raw1394_req) &&
@ -2437,6 +2283,9 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
if (retval < 0) {
free_pending_request(req);
} else {
BUG_ON(retval);
retval = count;
}
return retval;
@ -2802,6 +2651,103 @@ static int raw1394_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
#ifdef CONFIG_COMPAT
struct raw1394_iso_packets32 {
__u32 n_packets;
compat_uptr_t infos;
} __attribute__((packed));
struct raw1394_cycle_timer32 {
__u32 cycle_timer;
__u64 local_time;
}
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
__attribute__((packed))
#endif
;
#define RAW1394_IOC_ISO_RECV_PACKETS32 \
_IOW ('#', 0x25, struct raw1394_iso_packets32)
#define RAW1394_IOC_ISO_XMIT_PACKETS32 \
_IOW ('#', 0x27, struct raw1394_iso_packets32)
#define RAW1394_IOC_GET_CYCLE_TIMER32 \
_IOR ('#', 0x30, struct raw1394_cycle_timer32)
static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd,
struct raw1394_iso_packets32 __user *arg)
{
compat_uptr_t infos32;
void *infos;
long err = -EFAULT;
struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets));
if (!copy_in_user(&dst->n_packets, &arg->n_packets, sizeof arg->n_packets) &&
!copy_from_user(&infos32, &arg->infos, sizeof infos32)) {
infos = compat_ptr(infos32);
if (!copy_to_user(&dst->infos, &infos, sizeof infos))
err = raw1394_ioctl(NULL, file, cmd, (unsigned long)dst);
}
return err;
}
static long raw1394_read_cycle_timer32(struct file_info *fi, void __user * uaddr)
{
struct raw1394_cycle_timer32 ct;
int err;
err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time);
if (!err)
if (copy_to_user(uaddr, &ct, sizeof(ct)))
err = -EFAULT;
return err;
}
static long raw1394_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct file_info *fi = file->private_data;
void __user *argp = (void __user *)arg;
long err;
lock_kernel();
switch (cmd) {
/* These requests have same format as long as 'int' has same size. */
case RAW1394_IOC_ISO_RECV_INIT:
case RAW1394_IOC_ISO_RECV_START:
case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:
case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
case RAW1394_IOC_ISO_RECV_FLUSH:
case RAW1394_IOC_ISO_XMIT_RECV_STOP:
case RAW1394_IOC_ISO_XMIT_INIT:
case RAW1394_IOC_ISO_XMIT_START:
case RAW1394_IOC_ISO_XMIT_SYNC:
case RAW1394_IOC_ISO_GET_STATUS:
case RAW1394_IOC_ISO_SHUTDOWN:
case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
err = raw1394_ioctl(NULL, file, cmd, arg);
break;
/* These request have different format. */
case RAW1394_IOC_ISO_RECV_PACKETS32:
err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_RECV_PACKETS, argp);
break;
case RAW1394_IOC_ISO_XMIT_PACKETS32:
err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_XMIT_PACKETS, argp);
break;
case RAW1394_IOC_GET_CYCLE_TIMER32:
err = raw1394_read_cycle_timer32(fi, argp);
break;
default:
err = -EINVAL;
break;
}
unlock_kernel();
return err;
}
#endif
static unsigned int raw1394_poll(struct file *file, poll_table * pt)
{
struct file_info *fi = file->private_data;
@ -2861,14 +2807,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
if (fi->iso_state != RAW1394_ISO_INACTIVE)
raw1394_iso_shutdown(fi);
for (i = 0; i < 64; i++) {
if (fi->listen_channels & (1ULL << i)) {
hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i);
}
}
spin_lock_irqsave(&host_info_lock, flags);
fi->listen_channels = 0;
fail = 0;
/* set address-entries invalid */
@ -3030,7 +2969,6 @@ static struct hpsb_highlevel raw1394_highlevel = {
.add_host = add_host,
.remove_host = remove_host,
.host_reset = host_reset,
.iso_receive = iso_receive,
.fcp_request = fcp_request,
};
@ -3041,7 +2979,9 @@ static const struct file_operations raw1394_fops = {
.write = raw1394_write,
.mmap = raw1394_mmap,
.ioctl = raw1394_ioctl,
// .compat_ioctl = ... someone needs to do this
#ifdef CONFIG_COMPAT
.compat_ioctl = raw1394_compat_ioctl,
#endif
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
@ -3054,9 +2994,9 @@ static int __init init_raw1394(void)
hpsb_register_highlevel(&raw1394_highlevel);
if (IS_ERR
(class_device_create
(hpsb_protocol_class, NULL,
MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
(device_create(
hpsb_protocol_class, NULL,
MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
RAW1394_DEVICE_NAME))) {
ret = -EFAULT;
goto out_unreg;
@ -3083,9 +3023,9 @@ static int __init init_raw1394(void)
goto out;
out_dev:
class_device_destroy(hpsb_protocol_class,
MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_RAW1394 * 16));
device_destroy(hpsb_protocol_class,
MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_RAW1394 * 16));
out_unreg:
hpsb_unregister_highlevel(&raw1394_highlevel);
out:
@ -3094,9 +3034,9 @@ static int __init init_raw1394(void)
static void __exit cleanup_raw1394(void)
{
class_device_destroy(hpsb_protocol_class,
MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_RAW1394 * 16));
device_destroy(hpsb_protocol_class,
MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_RAW1394 * 16));
cdev_del(&raw1394_cdev);
hpsb_unregister_highlevel(&raw1394_highlevel);
hpsb_unregister_protocol(&raw1394_driver);

Просмотреть файл

@ -17,11 +17,11 @@
#define RAW1394_REQ_ASYNC_WRITE 101
#define RAW1394_REQ_LOCK 102
#define RAW1394_REQ_LOCK64 103
#define RAW1394_REQ_ISO_SEND 104
#define RAW1394_REQ_ISO_SEND 104 /* removed ABI, now a no-op */
#define RAW1394_REQ_ASYNC_SEND 105
#define RAW1394_REQ_ASYNC_STREAM 106
#define RAW1394_REQ_ISO_LISTEN 200
#define RAW1394_REQ_ISO_LISTEN 200 /* removed ABI, now a no-op */
#define RAW1394_REQ_FCP_LISTEN 201
#define RAW1394_REQ_RESET_BUS 202
#define RAW1394_REQ_GET_ROM 203

Просмотреть файл

@ -118,14 +118,13 @@ MODULE_PARM_DESC(max_speed, "Force max speed "
"(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)");
/*
* Set serialize_io to 1 if you'd like only one scsi command sent
* down to us at a time (debugging). This might be necessary for very
* badly behaved sbp2 devices.
* Set serialize_io to 0 or N to use dynamically appended lists of command ORBs.
* This is and always has been buggy in multiple subtle ways. See above TODOs.
*/
static int sbp2_serialize_io = 1;
module_param_named(serialize_io, sbp2_serialize_io, int, 0444);
MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers "
"(default = 1, faster = 0)");
module_param_named(serialize_io, sbp2_serialize_io, bool, 0444);
MODULE_PARM_DESC(serialize_io, "Serialize requests coming from SCSI drivers "
"(default = Y, faster but buggy = N)");
/*
* Bump up max_sectors if you'd like to support very large sized
@ -154,9 +153,9 @@ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
* are possible on OXFW911 and newer Oxsemi bridges.
*/
static int sbp2_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_exclusive_login, int, 0644);
module_param_named(exclusive_login, sbp2_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = 1)");
"(default = Y, use N for concurrent initiators)");
/*
* If any of the following workarounds is required for your device to work,

Просмотреть файл

@ -67,7 +67,7 @@ struct sbp2_command_orb {
#define ORB_SET_LUN(v) ((v) & 0xffff)
#define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16)
#define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20)
#define ORB_SET_EXCLUSIVE(v) (((v) & 0x1) << 28)
#define ORB_SET_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
#define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff)
#define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16)

Просмотреть файл

@ -1340,9 +1340,9 @@ static void video1394_add_host (struct hpsb_host *host)
hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
class_device_create(hpsb_protocol_class, NULL, MKDEV(
IEEE1394_MAJOR, minor),
NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
device_create(hpsb_protocol_class, NULL,
MKDEV(IEEE1394_MAJOR, minor),
"%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
}
@ -1351,8 +1351,8 @@ static void video1394_remove_host (struct hpsb_host *host)
struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
if (ohci)
class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
return;
}

Просмотреть файл

@ -30,16 +30,38 @@
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
/* The 'closure' fields are for user space to use. Data passed in the
* 'closure' field for a request will be returned in the corresponding
* event. It's a 64-bit type so that it's a fixed size type big
* enough to hold a pointer on all platforms. */
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
* @closure: For arbitrary use by userspace
* @type: Discriminates the fw_cdev_event_ types
*
* This struct may be used to access generic members of all fw_cdev_event_
* types regardless of the specific type.
*
* Data passed in the @closure field for a request will be returned in the
* corresponding event. It is big enough to hold a pointer on all platforms.
* The ioctl used to set @closure depends on the @type of event.
*/
struct fw_cdev_event_common {
__u64 closure;
__u32 type;
};
/**
* struct fw_cdev_event_bus_reset - Sent when a bus reset occurred
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_GET_INFO ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_BUS_RESET
* @node_id: New node ID of this node
* @local_node_id: Node ID of the local node, i.e. of the controller
* @bm_node_id: Node ID of the bus manager
* @irm_node_id: Node ID of the iso resource manager
* @root_node_id: Node ID of the root node
* @generation: New bus generation
*
* This event is sent when the bus the device belongs to goes through a bus
* reset. It provides information about the new bus configuration, such as
* new node ID for this device, new root ID, and others.
*/
struct fw_cdev_event_bus_reset {
__u64 closure;
__u32 type;
@ -51,6 +73,20 @@ struct fw_cdev_event_bus_reset {
__u32 generation;
};
/**
* struct fw_cdev_event_response - Sent when a response packet was received
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_IOC_SEND_REQUEST ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
* @rcode: Response code returned by the remote node
* @length: Data length, i.e. the response's payload size in bytes
* @data: Payload data, if any
*
* This event is sent when the stack receives a response to an outgoing request
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
* accessed through the @data field.
*/
struct fw_cdev_event_response {
__u64 closure;
__u32 type;
@ -59,6 +95,25 @@ struct fw_cdev_event_response {
__u32 data[0];
};
/**
* struct fw_cdev_event_request - Sent on incoming request to an address region
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
* @tcode: Transaction code of the incoming request
* @offset: The offset into the 48-bit per-node address space
* @handle: Reference to the kernel-side pending request
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
*
* This event is sent when the stack receives an incoming request to an address
* region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
* guaranteed to be completely contained in the specified region. Userspace is
* responsible for sending the response by %FW_CDEV_IOC_SEND_RESPONSE ioctl,
* using the same @handle.
*
* The payload data for requests carrying data (write and lock requests)
* follows immediately and can be accessed through the @data field.
*/
struct fw_cdev_event_request {
__u64 closure;
__u32 type;
@ -69,14 +124,39 @@ struct fw_cdev_event_request {
__u32 data[0];
};
/**
* struct fw_cdev_event_iso_interrupt - Sent when an iso packet was completed
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_ISO_INTERRUPT
* @cycle: Cycle counter of the interrupt packet
* @header_length: Total length of following headers, in bytes
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
* stripped of all packets up until and including the interrupt packet are
* returned in the @header field.
*/
struct fw_cdev_event_iso_interrupt {
__u64 closure;
__u32 type;
__u32 cycle;
__u32 header_length; /* Length in bytes of following headers. */
__u32 header_length;
__u32 header[0];
};
/**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
*
* Convenience union for userspace use. Events could be read(2) into a char
* buffer and then cast to this union for further processing.
*/
union fw_cdev_event {
struct fw_cdev_event_common common;
struct fw_cdev_event_bus_reset bus_reset;
@ -105,35 +185,47 @@ union fw_cdev_event {
*/
#define FW_CDEV_VERSION 1
/**
* struct fw_cdev_get_info - General purpose information ioctl
* @version: The version field is just a running serial number.
* We never break backwards compatibility, but may add more
* structs and ioctls in later revisions.
* @rom_length: If @rom is non-zero, at most rom_length bytes of configuration
* ROM will be copied into that user space address. In either
* case, @rom_length is updated with the actual length of the
* configuration ROM.
* @rom: If non-zero, address of a buffer to be filled by a copy of the
* local node's configuration ROM
* @bus_reset: If non-zero, address of a buffer to be filled by a
* &struct fw_cdev_event_bus_reset with the current state
* of the bus. This does not cause a bus reset to happen.
* @bus_reset_closure: Value of &closure in this and subsequent bus reset events
* @card: The index of the card this device belongs to
*/
struct fw_cdev_get_info {
/* The version field is just a running serial number. We
* never break backwards compatibility. Userspace passes in
* the version it expects and the kernel passes back the
* highest version it can provide. Even if the structs in
* this interface are extended in a later version, the kernel
* will not copy back more data than what was present in the
* interface version userspace expects. */
__u32 version;
/* If non-zero, at most rom_length bytes of config rom will be
* copied into that user space address. In either case,
* rom_length is updated with the actual length of the config
* rom. */
__u32 rom_length;
__u64 rom;
/* If non-zero, a fw_cdev_event_bus_reset struct will be
* copied here with the current state of the bus. This does
* not cause a bus reset to happen. The value of closure in
* this and sub-sequent bus reset events is set to
* bus_reset_closure. */
__u64 bus_reset;
__u64 bus_reset_closure;
/* The index of the card this devices belongs to. */
__u32 card;
};
/**
* struct fw_cdev_send_request - Send an asynchronous request packet
* @tcode: Transaction code of the request
* @length: Length of outgoing payload, in bytes
* @offset: 48-bit offset at destination node
* @closure: Passed back to userspace in the response event
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
*
* Send a request to the device. This ioctl implements all outgoing requests.
* Both quadlet and block request specify the payload as a pointer to the data
* in the @data field. Once the transaction completes, the kernel writes an
* &fw_cdev_event_request event back. The @closure field is passed back to
* user space in the response event.
*/
struct fw_cdev_send_request {
__u32 tcode;
__u32 length;
@ -143,6 +235,19 @@ struct fw_cdev_send_request {
__u32 generation;
};
/**
* struct fw_cdev_send_response - Send an asynchronous response packet
* @rcode: Response code as determined by the userspace handler
* @length: Length of outgoing payload, in bytes
* @data: Userspace pointer to payload
* @handle: The handle from the &fw_cdev_event_request
*
* Send a response to an incoming request. By setting up an address range using
* the %FW_CDEV_IOC_ALLOCATE ioctl, userspace can listen for incoming requests. An
* incoming request will generate an %FW_CDEV_EVENT_REQUEST, and userspace must
* send a reply using this ioctl. The event has a handle to the kernel-side
* pending transaction, which should be used with this ioctl.
*/
struct fw_cdev_send_response {
__u32 rcode;
__u32 length;
@ -150,6 +255,21 @@ struct fw_cdev_send_response {
__u32 handle;
};
/**
* struct fw_cdev_allocate - Allocate a CSR address range
* @offset: Start offset of the address range
* @closure: To be passed back to userspace in request events
* @length: Length of the address range, in bytes
* @handle: Handle to the allocation, written by the kernel
*
* Allocate an address range in the 48-bit address space on the local node
* (the controller). This allows userspace to listen for requests with an
* offset within that address range. When the kernel receives a request
* within the range, an &fw_cdev_event_request event will be written back.
* The @closure field is passed back to userspace in the response event.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
*/
struct fw_cdev_allocate {
__u64 offset;
__u64 closure;
@ -157,6 +277,11 @@ struct fw_cdev_allocate {
__u32 handle;
};
/**
* struct fw_cdev_deallocate - Free an address range allocation
* @handle: Handle to the address range, as returned by the kernel when the
* range was allocated
*/
struct fw_cdev_deallocate {
__u32 handle;
};
@ -164,10 +289,41 @@ struct fw_cdev_deallocate {
#define FW_CDEV_LONG_RESET 0
#define FW_CDEV_SHORT_RESET 1
/**
* struct fw_cdev_initiate_bus_reset - Initiate a bus reset
* @type: %FW_CDEV_SHORT_RESET or %FW_CDEV_LONG_RESET
*
* Initiate a bus reset for the bus this device is on. The bus reset can be
* either the original (long) bus reset or the arbitrated (short) bus reset
* introduced in 1394a-2000.
*/
struct fw_cdev_initiate_bus_reset {
__u32 type;
__u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */
};
/**
* struct fw_cdev_add_descriptor - Add contents to the local node's config ROM
* @immediate: If non-zero, immediate key to insert before pointer
* @key: Upper 8 bits of root directory pointer
* @data: Userspace pointer to contents of descriptor block
* @length: Length of descriptor block data, in bytes
* @handle: Handle to the descriptor, written by the kernel
*
* Add a descriptor block and optionally a preceding immediate key to the local
* node's configuration ROM.
*
* The @key field specifies the upper 8 bits of the descriptor root directory
* pointer and the @data and @length fields specify the contents. The @key
* should be of the form 0xXX000000. The offset part of the root directory entry
* will be filled in by the kernel.
*
* If not 0, the @immediate field specifies an immediate key which will be
* inserted before the root directory pointer.
*
* If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and
* immediate key.
*/
struct fw_cdev_add_descriptor {
__u32 immediate;
__u32 key;
@ -176,6 +332,14 @@ struct fw_cdev_add_descriptor {
__u32 handle;
};
/**
* struct fw_cdev_remove_descriptor - Remove contents from the configuration ROM
* @handle: Handle to the descriptor, as returned by the kernel when the
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
* node's configuration ROM.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
};
@ -183,12 +347,24 @@ struct fw_cdev_remove_descriptor {
#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8
#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15
/**
* struct fw_cdev_create_iso_context - Create a context for isochronous IO
* @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
* @header_size: Header size to strip for receive contexts
* @channel: Channel to bind to
* @speed: Speed to transmit at
* @closure: To be returned in &fw_cdev_event_iso_interrupt
* @handle: Handle to context, written back by kernel
*
* Prior to sending or receiving isochronous I/O, a context must be created.
* The context records information about the transmit or receive configuration
* and typically maps to an underlying hardware resource. A context is set up
* for either sending or receiving. It is bound to a specific isochronous
* channel.
*
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
*/
struct fw_cdev_create_iso_context {
__u32 type;
__u32 header_size;
@ -201,15 +377,49 @@ struct fw_cdev_create_iso_context {
#define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v)
#define FW_CDEV_ISO_INTERRUPT (1 << 16)
#define FW_CDEV_ISO_SKIP (1 << 17)
#define FW_CDEV_ISO_SYNC (1 << 17)
#define FW_CDEV_ISO_TAG(v) ((v) << 18)
#define FW_CDEV_ISO_SY(v) ((v) << 20)
#define FW_CDEV_ISO_HEADER_LENGTH(v) ((v) << 24)
/**
* struct fw_cdev_iso_packet - Isochronous packet
* @control: Contains the header length (8 uppermost bits), the sy field
* (4 bits), the tag field (2 bits), a sync flag (1 bit),
* a skip flag (1 bit), an interrupt flag (1 bit), and the
* payload length (16 lowermost bits)
* @header: Header and payload
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
*
* Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are
* specified by IEEE 1394a and IEC 61883.
*
* FIXME - finish this documentation
*/
struct fw_cdev_iso_packet {
__u32 control;
__u32 header[0];
};
/**
* struct fw_cdev_queue_iso - Queue isochronous packets for I/O
* @packets: Userspace pointer to packet data
* @data: Pointer into mmap()'ed payload buffer
* @size: Size of packet data in bytes
* @handle: Isochronous context handle
*
* Queue a number of isochronous packets for reception or transmission.
* This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
* which describe how to transmit from or receive into a contiguous region
* of a mmap()'ed payload buffer. As part of the packet descriptors,
* a series of headers can be supplied, which will be prepended to the
* payload during DMA.
*
* The kernel may or may not queue all packets, but will write back updated
* values of the @packets, @data and @size fields, so the ioctl can be
* resubmitted easily.
*/
struct fw_cdev_queue_iso {
__u64 packets;
__u64 data;
@ -217,6 +427,23 @@ struct fw_cdev_queue_iso {
__u32 handle;
};
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4
#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8
#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15
/**
* struct fw_cdev_start_iso - Start an isochronous transmission or reception
* @cycle: Cycle in which to start I/O. If @cycle is greater than or
* equal to 0, the I/O will start on that cycle.
* @sync: Determines the value to wait for for receive packets that have
* the %FW_CDEV_ISO_SYNC bit set
* @tags: Tag filter bit mask. Only valid for isochronous reception.
* Determines the tag values for which packets will be accepted.
* Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags.
* @handle: Isochronous context handle within which to transmit or receive
*/
struct fw_cdev_start_iso {
__s32 cycle;
__u32 sync;
@ -224,6 +451,10 @@ struct fw_cdev_start_iso {
__u32 handle;
};
/**
* struct fw_cdev_stop_iso - Stop an isochronous transmission or reception
* @handle: Handle of isochronous context to stop
*/
struct fw_cdev_stop_iso {
__u32 handle;
};