Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 1 | #ifndef _FIREWIRE_CORE_H |
| 2 | #define _FIREWIRE_CORE_H |
| 3 | |
| 4 | #include <linux/dma-mapping.h> |
| 5 | #include <linux/fs.h> |
| 6 | #include <linux/list.h> |
| 7 | #include <linux/idr.h> |
| 8 | #include <linux/mm_types.h> |
| 9 | #include <linux/rwsem.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/types.h> |
| 12 | |
| 13 | #include <asm/atomic.h> |
| 14 | |
| 15 | struct device; |
| 16 | struct fw_card; |
| 17 | struct fw_device; |
| 18 | struct fw_iso_buffer; |
| 19 | struct fw_iso_context; |
| 20 | struct fw_iso_packet; |
| 21 | struct fw_node; |
| 22 | struct fw_packet; |
| 23 | |
| 24 | |
| 25 | /* -card */ |
| 26 | |
| 27 | /* bitfields within the PHY registers */ |
| 28 | #define PHY_LINK_ACTIVE 0x80 |
| 29 | #define PHY_CONTENDER 0x40 |
| 30 | #define PHY_BUS_RESET 0x40 |
| 31 | #define PHY_BUS_SHORT_RESET 0x40 |
| 32 | |
| 33 | #define BANDWIDTH_AVAILABLE_INITIAL 4915 |
| 34 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
| 35 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
| 36 | |
| 37 | struct fw_card_driver { |
| 38 | /* |
| 39 | * Enable the given card with the given initial config rom. |
| 40 | * This function is expected to activate the card, and either |
| 41 | * enable the PHY or set the link_on bit and initiate a bus |
| 42 | * reset. |
| 43 | */ |
| 44 | int (*enable)(struct fw_card *card, u32 *config_rom, size_t length); |
| 45 | |
| 46 | int (*update_phy_reg)(struct fw_card *card, int address, |
| 47 | int clear_bits, int set_bits); |
| 48 | |
| 49 | /* |
| 50 | * Update the config rom for an enabled card. This function |
| 51 | * should change the config rom that is presented on the bus |
| 52 | * an initiate a bus reset. |
| 53 | */ |
| 54 | int (*set_config_rom)(struct fw_card *card, |
| 55 | u32 *config_rom, size_t length); |
| 56 | |
| 57 | void (*send_request)(struct fw_card *card, struct fw_packet *packet); |
| 58 | void (*send_response)(struct fw_card *card, struct fw_packet *packet); |
| 59 | /* Calling cancel is valid once a packet has been submitted. */ |
| 60 | int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet); |
| 61 | |
| 62 | /* |
| 63 | * Allow the specified node ID to do direct DMA out and in of |
| 64 | * host memory. The card will disable this for all node when |
| 65 | * a bus reset happens, so driver need to reenable this after |
| 66 | * bus reset. Returns 0 on success, -ENODEV if the card |
| 67 | * doesn't support this, -ESTALE if the generation doesn't |
| 68 | * match. |
| 69 | */ |
| 70 | int (*enable_phys_dma)(struct fw_card *card, |
| 71 | int node_id, int generation); |
| 72 | |
| 73 | u64 (*get_bus_time)(struct fw_card *card); |
| 74 | |
| 75 | struct fw_iso_context * |
| 76 | (*allocate_iso_context)(struct fw_card *card, |
| 77 | int type, int channel, size_t header_size); |
| 78 | void (*free_iso_context)(struct fw_iso_context *ctx); |
| 79 | |
| 80 | int (*start_iso)(struct fw_iso_context *ctx, |
| 81 | s32 cycle, u32 sync, u32 tags); |
| 82 | |
| 83 | int (*queue_iso)(struct fw_iso_context *ctx, |
| 84 | struct fw_iso_packet *packet, |
| 85 | struct fw_iso_buffer *buffer, |
| 86 | unsigned long payload); |
| 87 | |
| 88 | int (*stop_iso)(struct fw_iso_context *ctx); |
| 89 | }; |
| 90 | |
| 91 | void fw_card_initialize(struct fw_card *card, |
| 92 | const struct fw_card_driver *driver, struct device *device); |
| 93 | int fw_card_add(struct fw_card *card, |
| 94 | u32 max_receive, u32 link_speed, u64 guid); |
| 95 | void fw_core_remove_card(struct fw_card *card); |
| 96 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); |
| 97 | int fw_compute_block_crc(u32 *block); |
| 98 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); |
| 99 | |
| 100 | struct fw_descriptor { |
| 101 | struct list_head link; |
| 102 | size_t length; |
| 103 | u32 immediate; |
| 104 | u32 key; |
| 105 | const u32 *data; |
| 106 | }; |
| 107 | |
| 108 | int fw_core_add_descriptor(struct fw_descriptor *desc); |
| 109 | void fw_core_remove_descriptor(struct fw_descriptor *desc); |
| 110 | |
| 111 | |
| 112 | /* -cdev */ |
| 113 | |
| 114 | extern const struct file_operations fw_device_ops; |
| 115 | |
| 116 | void fw_device_cdev_update(struct fw_device *device); |
| 117 | void fw_device_cdev_remove(struct fw_device *device); |
| 118 | |
| 119 | |
| 120 | /* -device */ |
| 121 | |
| 122 | extern struct rw_semaphore fw_device_rwsem; |
| 123 | extern struct idr fw_device_idr; |
| 124 | extern int fw_cdev_major; |
| 125 | |
| 126 | struct fw_device *fw_device_get_by_devt(dev_t devt); |
Stefan Richter | 099d541 | 2009-06-06 18:37:25 +0200 | [diff] [blame^] | 127 | int fw_device_set_broadcast_channel(struct device *dev, void *gen); |
Stefan Richter | 77c9a5d | 2009-06-05 16:26:18 +0200 | [diff] [blame] | 128 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); |
| 129 | |
| 130 | |
| 131 | /* -iso */ |
| 132 | |
| 133 | /* |
| 134 | * The iso packet format allows for an immediate header/payload part |
| 135 | * stored in 'header' immediately after the packet info plus an |
| 136 | * indirect payload part that is pointer to by the 'payload' field. |
| 137 | * Applications can use one or the other or both to implement simple |
| 138 | * low-bandwidth streaming (e.g. audio) or more advanced |
| 139 | * scatter-gather streaming (e.g. assembling video frame automatically). |
| 140 | */ |
| 141 | struct fw_iso_packet { |
| 142 | u16 payload_length; /* Length of indirect payload. */ |
| 143 | u32 interrupt:1; /* Generate interrupt on this packet */ |
| 144 | u32 skip:1; /* Set to not send packet at all. */ |
| 145 | u32 tag:2; |
| 146 | u32 sy:4; |
| 147 | u32 header_length:8; /* Length of immediate header. */ |
| 148 | u32 header[0]; |
| 149 | }; |
| 150 | |
| 151 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
| 152 | #define FW_ISO_CONTEXT_RECEIVE 1 |
| 153 | |
| 154 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 |
| 155 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 |
| 156 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 |
| 157 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 |
| 158 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 |
| 159 | |
| 160 | /* |
| 161 | * An iso buffer is just a set of pages mapped for DMA in the |
| 162 | * specified direction. Since the pages are to be used for DMA, they |
| 163 | * are not mapped into the kernel virtual address space. We store the |
| 164 | * DMA address in the page private. The helper function |
| 165 | * fw_iso_buffer_map() will map the pages into a given vma. |
| 166 | */ |
| 167 | struct fw_iso_buffer { |
| 168 | enum dma_data_direction direction; |
| 169 | struct page **pages; |
| 170 | int page_count; |
| 171 | }; |
| 172 | |
| 173 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, |
| 174 | u32 cycle, size_t header_length, |
| 175 | void *header, void *data); |
| 176 | |
| 177 | struct fw_iso_context { |
| 178 | struct fw_card *card; |
| 179 | int type; |
| 180 | int channel; |
| 181 | int speed; |
| 182 | size_t header_size; |
| 183 | fw_iso_callback_t callback; |
| 184 | void *callback_data; |
| 185 | }; |
| 186 | |
| 187 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
| 188 | int page_count, enum dma_data_direction direction); |
| 189 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
| 190 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); |
| 191 | |
| 192 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
| 193 | int type, int channel, int speed, size_t header_size, |
| 194 | fw_iso_callback_t callback, void *callback_data); |
| 195 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
| 196 | struct fw_iso_packet *packet, |
| 197 | struct fw_iso_buffer *buffer, |
| 198 | unsigned long payload); |
| 199 | int fw_iso_context_start(struct fw_iso_context *ctx, |
| 200 | int cycle, int sync, int tags); |
| 201 | int fw_iso_context_stop(struct fw_iso_context *ctx); |
| 202 | void fw_iso_context_destroy(struct fw_iso_context *ctx); |
| 203 | |
| 204 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
| 205 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); |
| 206 | |
| 207 | |
| 208 | /* -topology */ |
| 209 | |
| 210 | enum { |
| 211 | FW_NODE_CREATED, |
| 212 | FW_NODE_UPDATED, |
| 213 | FW_NODE_DESTROYED, |
| 214 | FW_NODE_LINK_ON, |
| 215 | FW_NODE_LINK_OFF, |
| 216 | FW_NODE_INITIATED_RESET, |
| 217 | }; |
| 218 | |
| 219 | struct fw_node { |
| 220 | u16 node_id; |
| 221 | u8 color; |
| 222 | u8 port_count; |
| 223 | u8 link_on:1; |
| 224 | u8 initiated_reset:1; |
| 225 | u8 b_path:1; |
| 226 | u8 phy_speed:2; /* As in the self ID packet. */ |
| 227 | u8 max_speed:2; /* Minimum of all phy-speeds on the path from the |
| 228 | * local node to this node. */ |
| 229 | u8 max_depth:4; /* Maximum depth to any leaf node */ |
| 230 | u8 max_hops:4; /* Max hops in this sub tree */ |
| 231 | atomic_t ref_count; |
| 232 | |
| 233 | /* For serializing node topology into a list. */ |
| 234 | struct list_head link; |
| 235 | |
| 236 | /* Upper layer specific data. */ |
| 237 | void *data; |
| 238 | |
| 239 | struct fw_node *ports[0]; |
| 240 | }; |
| 241 | |
| 242 | static inline struct fw_node *fw_node_get(struct fw_node *node) |
| 243 | { |
| 244 | atomic_inc(&node->ref_count); |
| 245 | |
| 246 | return node; |
| 247 | } |
| 248 | |
| 249 | static inline void fw_node_put(struct fw_node *node) |
| 250 | { |
| 251 | if (atomic_dec_and_test(&node->ref_count)) |
| 252 | kfree(node); |
| 253 | } |
| 254 | |
| 255 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, |
| 256 | int generation, int self_id_count, u32 *self_ids); |
| 257 | void fw_destroy_nodes(struct fw_card *card); |
| 258 | |
| 259 | /* |
| 260 | * Check whether new_generation is the immediate successor of old_generation. |
| 261 | * Take counter roll-over at 255 (as per OHCI) into account. |
| 262 | */ |
| 263 | static inline bool is_next_generation(int new_generation, int old_generation) |
| 264 | { |
| 265 | return (new_generation & 0xff) == ((old_generation + 1) & 0xff); |
| 266 | } |
| 267 | |
| 268 | |
| 269 | /* -transaction */ |
| 270 | |
| 271 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
| 272 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
| 273 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) |
| 274 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) |
| 275 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) |
| 276 | #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) |
| 277 | |
| 278 | #define LOCAL_BUS 0xffc0 |
| 279 | |
| 280 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
| 281 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
| 282 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
| 283 | int rcode, void *payload, size_t length); |
| 284 | void fw_flush_transactions(struct fw_card *card); |
| 285 | void fw_send_phy_config(struct fw_card *card, |
| 286 | int node_id, int generation, int gap_count); |
| 287 | |
| 288 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) |
| 289 | { |
| 290 | return tag << 14 | channel << 8 | sy; |
| 291 | } |
| 292 | |
| 293 | #endif /* _FIREWIRE_CORE_H */ |