1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef S390_ISM_H
3#define S390_ISM_H
4
5#include <linux/spinlock.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/dibs.h>
9#include <asm/pci_insn.h>
10
11#define UTIL_STR_LEN 16
12#define ISM_ERROR 0xFFFF
13
14#define ISM_NR_DMBS 1920
15
16/*
17 * Do not use the first word of the DMB bits to ensure 8 byte aligned access.
18 */
19#define ISM_DMB_WORD_OFFSET 1
20#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
21
22#define ISM_REG_SBA 0x1
23#define ISM_REG_IEQ 0x2
24#define ISM_READ_GID 0x3
25#define ISM_ADD_VLAN_ID 0x4
26#define ISM_DEL_VLAN_ID 0x5
27#define ISM_SET_VLAN 0x6
28#define ISM_RESET_VLAN 0x7
29#define ISM_QUERY_INFO 0x8
30#define ISM_QUERY_RGID 0x9
31#define ISM_REG_DMB 0xA
32#define ISM_UNREG_DMB 0xB
33#define ISM_SIGNAL_IEQ 0xE
34#define ISM_UNREG_SBA 0x11
35#define ISM_UNREG_IEQ 0x12
36
37enum ism_event_type {
38 ISM_EVENT_BUF = 0x00,
39 ISM_EVENT_DEV = 0x01,
40 ISM_EVENT_SWR = 0x02
41};
42
43enum ism_event_code {
44 ISM_BUF_DMB_UNREGISTERED = 0x04,
45 ISM_BUF_USING_ISM_DEV_DISABLED = 0x08,
46 ISM_BUF_OWNING_ISM_DEV_IN_ERR_STATE = 0x02,
47 ISM_BUF_USING_ISM_DEV_IN_ERR_STATE = 0x03,
48 ISM_BUF_VLAN_MISMATCH_WITH_OWNER = 0x05,
49 ISM_BUF_VLAN_MISMATCH_WITH_USER = 0x06,
50 ISM_DEV_GID_DISABLED = 0x07,
51 ISM_DEV_GID_ERR_STATE = 0x01
52};
53
54struct ism_req_hdr {
55 u32 cmd;
56 u16 : 16;
57 u16 len;
58};
59
60struct ism_resp_hdr {
61 u32 cmd;
62 u16 ret;
63 u16 len;
64};
65
66union ism_reg_sba {
67 struct {
68 struct ism_req_hdr hdr;
69 u64 sba;
70 } request;
71 struct {
72 struct ism_resp_hdr hdr;
73 } response;
74} __aligned(16);
75
76union ism_reg_ieq {
77 struct {
78 struct ism_req_hdr hdr;
79 u64 ieq;
80 u64 len;
81 } request;
82 struct {
83 struct ism_resp_hdr hdr;
84 } response;
85} __aligned(16);
86
87/* ISM-vPCI devices provide 64 Bit GIDs
88 * Map them to ISM UUID GIDs like this:
89 * _________________________________________
90 * | 64 Bit ISM-vPCI GID | 00000000_00000000 |
91 * -----------------------------------------
92 * This will be interpreted as a UIID variant, that is reserved
93 * for NCS backward compatibility. So it will not collide with
94 * proper UUIDs.
95 */
96union ism_read_gid {
97 struct {
98 struct ism_req_hdr hdr;
99 } request;
100 struct {
101 struct ism_resp_hdr hdr;
102 u64 gid;
103 } response;
104} __aligned(16);
105
106union ism_qi {
107 struct {
108 struct ism_req_hdr hdr;
109 } request;
110 struct {
111 struct ism_resp_hdr hdr;
112 u32 version;
113 u32 max_len;
114 u64 ism_state;
115 u64 my_gid;
116 u64 sba;
117 u64 ieq;
118 u32 ieq_len;
119 u32 : 32;
120 u32 dmbs_owned;
121 u32 dmbs_used;
122 u32 vlan_required;
123 u32 vlan_nr_ids;
124 u16 vlan_id[64];
125 } response;
126} __aligned(64);
127
128union ism_query_rgid {
129 struct {
130 struct ism_req_hdr hdr;
131 u64 rgid;
132 u32 vlan_valid;
133 u32 vlan_id;
134 } request;
135 struct {
136 struct ism_resp_hdr hdr;
137 } response;
138} __aligned(16);
139
140union ism_reg_dmb {
141 struct {
142 struct ism_req_hdr hdr;
143 u64 dmb;
144 u32 dmb_len;
145 u32 sba_idx;
146 u32 vlan_valid;
147 u32 vlan_id;
148 u64 rgid;
149 } request;
150 struct {
151 struct ism_resp_hdr hdr;
152 u64 dmb_tok;
153 } response;
154} __aligned(32);
155
156union ism_sig_ieq {
157 struct {
158 struct ism_req_hdr hdr;
159 u64 rgid;
160 u32 trigger_irq;
161 u32 event_code;
162 u64 info;
163 } request;
164 struct {
165 struct ism_resp_hdr hdr;
166 } response;
167} __aligned(32);
168
169union ism_unreg_dmb {
170 struct {
171 struct ism_req_hdr hdr;
172 u64 dmb_tok;
173 } request;
174 struct {
175 struct ism_resp_hdr hdr;
176 } response;
177} __aligned(16);
178
179union ism_cmd_simple {
180 struct {
181 struct ism_req_hdr hdr;
182 } request;
183 struct {
184 struct ism_resp_hdr hdr;
185 } response;
186} __aligned(8);
187
188union ism_set_vlan_id {
189 struct {
190 struct ism_req_hdr hdr;
191 u64 vlan_id;
192 } request;
193 struct {
194 struct ism_resp_hdr hdr;
195 } response;
196} __aligned(16);
197
198struct ism_eq_header {
199 u64 idx;
200 u64 ieq_len;
201 u64 entry_len;
202 u64 : 64;
203};
204
205struct ism_event {
206 u32 type;
207 u32 code;
208 u64 tok;
209 u64 time;
210 u64 info;
211};
212
213struct ism_eq {
214 struct ism_eq_header header;
215 struct ism_event entry[15];
216};
217
218struct ism_sba {
219 u32 s : 1; /* summary bit */
220 u32 e : 1; /* event bit */
221 u32 : 30;
222 u32 dmb_bits[ISM_NR_DMBS / 32];
223 u32 reserved[3];
224 u16 dmbe_mask[ISM_NR_DMBS];
225};
226
227struct ism_dev {
228 spinlock_t cmd_lock; /* serializes cmds */
229 struct dibs_dev *dibs;
230 struct pci_dev *pdev;
231 struct ism_sba *sba;
232 dma_addr_t sba_dma_addr;
233 DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
234
235 struct ism_eq *ieq;
236 dma_addr_t ieq_dma_addr;
237 int ieq_idx;
238};
239
240#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
241 ((dmb) | (idx) << 24 | (sf) << 23 | (offset))
242
243static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
244 unsigned long offset, unsigned long len)
245{
246 struct zpci_dev *zdev = to_zpci(ism->pdev);
247 u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8);
248
249 while (len > 0) {
250 __zpci_load(data, req, offset);
251 offset += 8;
252 data += 8;
253 len -= 8;
254 }
255}
256
257static inline void __ism_write_cmd(struct ism_dev *ism, void *data,
258 unsigned long offset, unsigned long len)
259{
260 struct zpci_dev *zdev = to_zpci(ism->pdev);
261 u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len);
262
263 if (len)
264 __zpci_store_block(data, req, offset);
265}
266
267static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
268 unsigned int size)
269{
270 struct zpci_dev *zdev = to_zpci(ism->pdev);
271 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
272
273 return __zpci_store_block(data, req, dmb_req);
274}
275
276#endif /* S390_ISM_H */
277

source code of linux/drivers/s390/net/ism.h