blob: 1d2a64eb060d54409b088984a8e801247f205e3f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andreas Noever3364f0c2014-06-03 22:04:08 +02002/*
Mika Westerberg93f36ad2017-02-19 13:48:29 +02003 * Thunderbolt driver - Tunneling support
Andreas Noever3364f0c2014-06-03 22:04:08 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg93f36ad2017-02-19 13:48:29 +02006 * Copyright (C) 2019, Intel Corporation
Andreas Noever3364f0c2014-06-03 22:04:08 +02007 */
8
Mika Westerberg1752b9f2017-02-19 10:58:35 +02009#ifndef TB_TUNNEL_H_
10#define TB_TUNNEL_H_
Andreas Noever3364f0c2014-06-03 22:04:08 +020011
12#include "tb.h"
13
Mika Westerberg4f807e42018-09-17 16:30:49 +030014enum tb_tunnel_type {
15 TB_TUNNEL_PCI,
16 TB_TUNNEL_DP,
Mika Westerberg44242d62018-09-28 16:35:32 +030017 TB_TUNNEL_DMA,
Rajmohan Manie6f81852019-12-17 15:33:44 +030018 TB_TUNNEL_USB3,
Mika Westerberg4f807e42018-09-17 16:30:49 +030019};
20
Mika Westerberg93f36ad2017-02-19 13:48:29 +020021/**
22 * struct tb_tunnel - Tunnel between two ports
23 * @tb: Pointer to the domain
24 * @src_port: Source port of the tunnel
Mika Westerberg0414bec2017-02-19 23:43:26 +020025 * @dst_port: Destination port of the tunnel. For discovered incomplete
26 * tunnels may be %NULL or null adapter port instead.
Mika Westerberg93f36ad2017-02-19 13:48:29 +020027 * @paths: All paths required by the tunnel
28 * @npaths: Number of paths in @paths
Mika Westerberg4f807e42018-09-17 16:30:49 +030029 * @init: Optional tunnel specific initialization
Mika Westerberg93f36ad2017-02-19 13:48:29 +020030 * @activate: Optional tunnel specific activation/deactivation
Mika Westerberga11b88a2019-03-26 16:03:48 +030031 * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
Mika Westerberg0bd680c2020-03-24 14:44:13 +020032 * @release_unused_bandwidth: Release all unused bandwidth
33 * @reclaim_available_bandwidth: Reclaim back available bandwidth
Mika Westerberg93f36ad2017-02-19 13:48:29 +020034 * @list: Tunnels are linked using this field
Mika Westerberg4f807e42018-09-17 16:30:49 +030035 * @type: Type of the tunnel
Mika Westerberg0bd680c2020-03-24 14:44:13 +020036 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
Mika Westerberga11b88a2019-03-26 16:03:48 +030037 * Only set if the bandwidth needs to be limited.
Mika Westerberg0bd680c2020-03-24 14:44:13 +020038 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
39 * Only set if the bandwidth needs to be limited.
40 * @allocated_up: Allocated upstream bandwidth (only for USB3)
41 * @allocated_down: Allocated downstream bandwidth (only for USB3)
Mika Westerberg93f36ad2017-02-19 13:48:29 +020042 */
43struct tb_tunnel {
Andreas Noever3364f0c2014-06-03 22:04:08 +020044 struct tb *tb;
Mika Westerberg93f36ad2017-02-19 13:48:29 +020045 struct tb_port *src_port;
46 struct tb_port *dst_port;
47 struct tb_path **paths;
48 size_t npaths;
Mika Westerberg4f807e42018-09-17 16:30:49 +030049 int (*init)(struct tb_tunnel *tunnel);
Mika Westerberg93f36ad2017-02-19 13:48:29 +020050 int (*activate)(struct tb_tunnel *tunnel, bool activate);
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +020051 int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
52 int *consumed_down);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020053 int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
54 void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
55 int *available_up,
56 int *available_down);
Andreas Noever3364f0c2014-06-03 22:04:08 +020057 struct list_head list;
Mika Westerberg4f807e42018-09-17 16:30:49 +030058 enum tb_tunnel_type type;
Mika Westerberg0bd680c2020-03-24 14:44:13 +020059 int max_up;
60 int max_down;
61 int allocated_up;
62 int allocated_down;
Andreas Noever3364f0c2014-06-03 22:04:08 +020063};
64
Mika Westerberg0414bec2017-02-19 23:43:26 +020065struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
Mika Westerberg93f36ad2017-02-19 13:48:29 +020066struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
67 struct tb_port *down);
Mika Westerberg4f807e42018-09-17 16:30:49 +030068struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
69struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
Mika Westerberg0bd680c2020-03-24 14:44:13 +020070 struct tb_port *out, int max_up,
71 int max_down);
Mika Westerberg44242d62018-09-28 16:35:32 +030072struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
73 struct tb_port *dst, int transmit_ring,
74 int transmit_path, int receive_ring,
75 int receive_path);
Rajmohan Manie6f81852019-12-17 15:33:44 +030076struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
77struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
Mika Westerberg0bd680c2020-03-24 14:44:13 +020078 struct tb_port *down, int max_up,
79 int max_down);
Mika Westerberg4f807e42018-09-17 16:30:49 +030080
Mika Westerberg93f36ad2017-02-19 13:48:29 +020081void tb_tunnel_free(struct tb_tunnel *tunnel);
82int tb_tunnel_activate(struct tb_tunnel *tunnel);
83int tb_tunnel_restart(struct tb_tunnel *tunnel);
84void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
85bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020086bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
87 const struct tb_port *port);
Mika Westerberg7c0ee8f2020-03-28 12:52:31 +020088int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
89 int *consumed_down);
Mika Westerberg0bd680c2020-03-24 14:44:13 +020090int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
91void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
92 int *available_up,
93 int *available_down);
Andreas Noever3364f0c2014-06-03 22:04:08 +020094
Mika Westerberg4f807e42018-09-17 16:30:49 +030095static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
96{
97 return tunnel->type == TB_TUNNEL_PCI;
98}
99
100static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
101{
102 return tunnel->type == TB_TUNNEL_DP;
103}
104
Mika Westerberg44242d62018-09-28 16:35:32 +0300105static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
106{
107 return tunnel->type == TB_TUNNEL_DMA;
108}
109
Rajmohan Manie6f81852019-12-17 15:33:44 +0300110static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
111{
112 return tunnel->type == TB_TUNNEL_USB3;
113}
114
Andreas Noever3364f0c2014-06-03 22:04:08 +0200115#endif
116