blob: 03e56076b5bcff2f792ef2137cb98090c018e16d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
Mika Westerberg1752b9f2017-02-19 10:58:35 +02009#ifndef TB_TUNNEL_H_
10#define TB_TUNNEL_H_
Andreas Noever3364f0c2014-06-03 22:04:08 +020011
12#include "tb.h"
13
Mika Westerberg4f807e42018-09-17 16:30:49 +030014enum tb_tunnel_type {
15 TB_TUNNEL_PCI,
16 TB_TUNNEL_DP,
Mika Westerberg44242d62018-09-28 16:35:32 +030017 TB_TUNNEL_DMA,
Rajmohan Manie6f81852019-12-17 15:33:44 +030018 TB_TUNNEL_USB3,
Mika Westerberg4f807e42018-09-17 16:30:49 +030019};
20
Mika Westerberg93f36ad2017-02-19 13:48:29 +020021/**
22 * struct tb_tunnel - Tunnel between two ports
23 * @tb: Pointer to the domain
24 * @src_port: Source port of the tunnel
Mika Westerberg0414bec2017-02-19 23:43:26 +020025 * @dst_port: Destination port of the tunnel. For discovered incomplete
26 * tunnels may be %NULL or null adapter port instead.
Mika Westerberg93f36ad2017-02-19 13:48:29 +020027 * @paths: All paths required by the tunnel
28 * @npaths: Number of paths in @paths
Mika Westerberg4f807e42018-09-17 16:30:49 +030029 * @init: Optional tunnel specific initialization
Mika Westerberg6ed541c2021-03-22 18:09:35 +020030 * @deinit: Optional tunnel specific de-initialization
Mika Westerberg93f36ad2017-02-19 13:48:29 +020031 * @activate: Optional tunnel specific activation/deactivation
Mika Westerberga11b88a2019-03-26 16:03:48 +030032 * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
Mika Westerberg0bd680c2020-03-24 14:44:13 +020033 * @release_unused_bandwidth: Release all unused bandwidth
34 * @reclaim_available_bandwidth: Reclaim back available bandwidth
Mika Westerberg93f36ad2017-02-19 13:48:29 +020035 * @list: Tunnels are linked using this field
Mika Westerberg4f807e42018-09-17 16:30:49 +030036 * @type: Type of the tunnel
Mika Westerberg0bd680c2020-03-24 14:44:13 +020037 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
Mika Westerberga11b88a2019-03-26 16:03:48 +030038 * Only set if the bandwidth needs to be limited.
Mika Westerberg0bd680c2020-03-24 14:44:13 +020039 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
40 * Only set if the bandwidth needs to be limited.
41 * @allocated_up: Allocated upstream bandwidth (only for USB3)
42 * @allocated_down: Allocated downstream bandwidth (only for USB3)
Mika Westerberg93f36ad2017-02-19 13:48:29 +020043 */
44struct tb_tunnel {
Andreas Noever3364f0c2014-06-03 22:04:08 +020045 struct tb *tb;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020046 struct tb_port *src_port;
47 struct tb_port *dst_port;
48 struct tb_path **paths;
49 size_t npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +030050 int (*init)(struct tb_tunnel *tunnel);
Mika Westerberg6ed541c2021-03-22 18:09:35 +020051 void (*deinit)(struct tb_tunnel *tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +020052 int (*activate)(struct tb_tunnel *tunnel, bool activate);
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +020053 int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
54 int *consumed_down);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020055 int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
56 void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
57 int *available_up,
58 int *available_down);
Andreas Noever3364f0c2014-06-03 22:04:08 +020059 struct list_head list;
Mika Westerberg4f807e42018-09-17 16:30:49 +030060 enum tb_tunnel_type type;
Mika Westerberg0bd680c2020-03-24 14:44:13 +020061 int max_up;
62 int max_down;
63 int allocated_up;
64 int allocated_down;
Andreas Noever3364f0c2014-06-03 22:04:08 +020065};
66
Mika Westerberg43bddb22021-11-14 17:20:59 +020067struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
68 bool alloc_hopid);
Mika Westerberg93f36ad2017-02-19 13:48:29 +020069struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
70 struct tb_port *down);
Mika Westerberg43bddb22021-11-14 17:20:59 +020071struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
72 bool alloc_hopid);
Mika Westerberg4f807e42018-09-17 16:30:49 +030073struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
Mika Westerberg0bd680c2020-03-24 14:44:13 +020074 struct tb_port *out, int max_up,
75 int max_down);
Mika Westerberg44242d62018-09-28 16:35:32 +030076struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
Mika Westerberg180b0682021-01-08 16:25:39 +020077 struct tb_port *dst, int transmit_path,
78 int transmit_ring, int receive_path,
79 int receive_ring);
80bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
81 int transmit_ring, int receive_path, int receive_ring);
Mika Westerberg43bddb22021-11-14 17:20:59 +020082struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
83 bool alloc_hopid);
Rajmohan Manie6f81852019-12-17 15:33:44 +030084struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
Mika Westerberg0bd680c2020-03-24 14:44:13 +020085 struct tb_port *down, int max_up,
86 int max_down);
Mika Westerberg4f807e42018-09-17 16:30:49 +030087
Mika Westerberg93f36ad2017-02-19 13:48:29 +020088void tb_tunnel_free(struct tb_tunnel *tunnel);
89int tb_tunnel_activate(struct tb_tunnel *tunnel);
90int tb_tunnel_restart(struct tb_tunnel *tunnel);
91void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
92bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020093bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
94 const struct tb_port *port);
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +020095int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
96 int *consumed_down);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020097int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
98void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
99 int *available_up,
100 int *available_down);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200101
Mika Westerberg4f807e42018-09-17 16:30:49 +0300102static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
103{
104 return tunnel->type == TB_TUNNEL_PCI;
105}
106
107static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
108{
109 return tunnel->type == TB_TUNNEL_DP;
110}
111
Mika Westerberg44242d62018-09-28 16:35:32 +0300112static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
113{
114 return tunnel->type == TB_TUNNEL_DMA;
115}
116
Rajmohan Manie6f81852019-12-17 15:33:44 +0300117static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
118{
119 return tunnel->type == TB_TUNNEL_USB3;
120}
121
Andreas Noever3364f0c2014-06-03 22:04:08 +0200122#endif
123