1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
|
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
#define ADF_ARB_REG_SIZE 0x4
#define ADF_ARB_WTR_SIZE 0x20
#define ADF_ARB_OFFSET 0x30000
#define ADF_ARB_REG_SLOT 0x1000
#define ADF_ARB_WTR_OFFSET 0x010
#define ADF_ARB_RO_EN_OFFSET 0x090
#define ADF_ARB_WQCFG_OFFSET 0x100
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, \
ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
value)
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value) \
ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value)
#define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index) \
ADF_CSR_RD(csr_addr, \
ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
static DEFINE_MUTEX(csr_arb_lock);
#define WRITE_CSR_ARB_WRK_2_SER_MAP( \
csr_addr, csr_offset, wrk_to_ser_map_offset, index, value) \
ADF_CSR_WR(csr_addr, \
((csr_offset) + (wrk_to_ser_map_offset)) + \
(ADF_ARB_REG_SIZE * (index)), \
value)
int
adf_init_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct arb_info info;
struct resource *csr = accel_dev->transport->banks[0].csr_addr;
u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
u32 arb;
hw_data->get_arb_info(&info);
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled.
*/
for (arb = 0; arb < ADF_ARB_NUM; arb++)
WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg);
return 0;
}
int
adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct arb_info info;
struct resource *csr = accel_dev->transport->banks[0].csr_addr;
u32 i;
const u32 *thd_2_arb_cfg;
/* invoke common adf_init_arb */
adf_init_arb(accel_dev);
hw_data->get_arb_info(&info);
/* Map worker threads to service arbiters */
hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return EFAULT;
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
info.arbiter_offset,
info.wrk_thd_2_srv_arb_map,
i,
*(thd_2_arb_cfg + i));
return 0;
}
void
adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
int shift;
u32 arben, arben_tx, arben_rx, arb_mask;
struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
arb_mask = csr_info->arb_enable_mask;
shift = hweight32(arb_mask);
arben_tx = ring->bank->ring_mask & arb_mask;
arben_rx = (ring->bank->ring_mask >> shift) & arb_mask;
arben = arben_tx & arben_rx;
csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
ring->bank->bank_number,
arben);
}
void
adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle)
{
int shift;
u32 arben, arben_tx, arben_rx, arb_mask;
struct adf_accel_dev *accel_dev = bundle->uio_priv.accel->accel_dev;
struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
arb_mask = csr_info->arb_enable_mask;
shift = hweight32(arb_mask);
arben_tx = bundle->rings_enabled & arb_mask;
arben_rx = (bundle->rings_enabled >> shift) & arb_mask;
arben = arben_tx & arben_rx;
csr_ops->write_csr_ring_srv_arb_en(bundle->csr_addr,
bundle->hardware_bundle_number,
arben);
}
void
adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u32 arbenable;
if (!csr_addr)
return;
mutex_lock(&csr_arb_lock);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr_addr, bank_nr);
arbenable |= mask & 0xFF;
csr_ops->write_csr_ring_srv_arb_en(csr_addr, bank_nr, arbenable);
mutex_unlock(&csr_arb_lock);
}
void
adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct resource *csr = csr_addr;
u32 arbenable;
if (!csr_addr)
return;
mutex_lock(&csr_arb_lock);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
arbenable &= ~mask & 0xFF;
csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
mutex_unlock(&csr_arb_lock);
}
void
adf_exit_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct arb_info info;
struct resource *csr;
unsigned int i;
if (!accel_dev->transport)
return;
csr = accel_dev->transport->banks[0].csr_addr;
hw_data->get_arb_info(&info);
/* Reset arbiter configuration */
for (i = 0; i < ADF_ARB_NUM; i++)
WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0);
/* Unmap worker threads to service arbiters */
if (hw_data->get_arb_mapping) {
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
info.arbiter_offset,
info.wrk_thd_2_srv_arb_map,
i,
0);
}
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
void
adf_disable_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_csr_ops *csr_ops;
struct resource *csr;
unsigned int i;
if (!accel_dev || !accel_dev->transport)
return;
csr = accel_dev->transport->banks[0].csr_addr;
csr_ops = GET_CSR_OPS(accel_dev);
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
|