aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/nvmf/nvmf_transport_internal.h
blob: 0be427ee0690359dc3fead121d213a44349da215 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
/*-
 * SPDX-License-Identifier: BSD-2-Clause
 *
 * Copyright (c) 2022-2024 Chelsio Communications, Inc.
 * Written by: John Baldwin <jhb@FreeBSD.org>
 */

#ifndef __NVMF_TRANSPORT_INTERNAL_H__
#define	__NVMF_TRANSPORT_INTERNAL_H__

#include <sys/memdesc.h>

/*
 * Interface between the transport-independent APIs in
 * nvmf_transport.c and individual transports.
 */

struct module;
struct nvmf_io_request;

struct nvmf_transport_ops {
	/* Queue pair management. */
	struct nvmf_qpair *(*allocate_qpair)(bool controller,
	    const struct nvmf_handoff_qpair_params *params);
	void (*free_qpair)(struct nvmf_qpair *qp);

	/* Capsule operations. */
	struct nvmf_capsule *(*allocate_capsule)(struct nvmf_qpair *qp,
	    int how);
	void (*free_capsule)(struct nvmf_capsule *nc);
	int (*transmit_capsule)(struct nvmf_capsule *nc);
	uint8_t (*validate_command_capsule)(struct nvmf_capsule *nc);

	/* Transferring controller data. */
	size_t (*capsule_data_len)(const struct nvmf_capsule *nc);
	int (*receive_controller_data)(struct nvmf_capsule *nc,
	    uint32_t data_offset, struct nvmf_io_request *io);
	u_int (*send_controller_data)(struct nvmf_capsule *nc,
	    uint32_t data_offset, struct mbuf *m, size_t len);

	enum nvmf_trtype trtype;
	int priority;
};

/* Either an Admin or I/O Submission/Completion Queue pair. */
struct nvmf_qpair {
	struct nvmf_transport *nq_transport;
	struct nvmf_transport_ops *nq_ops;
	bool nq_controller;

	/* Callback to invoke for a received capsule. */
	nvmf_capsule_receive_t *nq_receive;
	void *nq_receive_arg;

	/* Callback to invoke for an error. */
	nvmf_qpair_error_t *nq_error;
	void *nq_error_arg;

	bool nq_admin;
};

struct nvmf_io_request {
	/*
	 * Data buffer contains io_len bytes in the backing store
	 * described by mem.
	 */
	struct memdesc io_mem;
	size_t	io_len;
	nvmf_io_complete_t *io_complete;
	void	*io_complete_arg;
};

/*
 * Fabrics Command and Response Capsules.  The Fabrics host
 * (initiator) and controller (target) drivers work with capsules that
 * are transmitted and received by a specific transport.
 */
struct nvmf_capsule {
	struct nvmf_qpair *nc_qpair;

	/* Either a SQE or CQE. */
	union {
		struct nvme_command nc_sqe;
		struct nvme_completion nc_cqe;
	};
	int	nc_qe_len;

	/*
	 * Is SQHD in received capsule valid?  False for locally-
	 * synthesized responses.
	 */
	bool	nc_sqhd_valid;

	bool	nc_send_data;
	struct nvmf_io_request nc_data;
};

static void __inline
nvmf_qpair_error(struct nvmf_qpair *nq, int error)
{
	nq->nq_error(nq->nq_error_arg, error);
}

static void __inline
nvmf_capsule_received(struct nvmf_qpair *nq, struct nvmf_capsule *nc)
{
	nq->nq_receive(nq->nq_receive_arg, nc);
}

static void __inline
nvmf_complete_io_request(struct nvmf_io_request *io, size_t xfered, int error)
{
	io->io_complete(io->io_complete_arg, xfered, error);
}

int	nvmf_transport_module_handler(struct module *, int, void *);

#define	NVMF_TRANSPORT(name, ops)					\
static moduledata_t nvmf_transport_##name##_mod = {			\
	"nvmf/" #name,							\
	nvmf_transport_module_handler,					\
	&(ops)								\
};									\
DECLARE_MODULE(nvmf_transport_##name, nvmf_transport_##name##_mod,	\
    SI_SUB_DRIVERS, SI_ORDER_ANY);					\
MODULE_DEPEND(nvmf_transport_##name, nvmf_transport, 1, 1, 1)

#endif /* !__NVMF_TRANSPORT_INTERNAL_H__ */