aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/acpica/Osd/OsdSchedule.c
blob: fd1b9310dd9fbf19abc2e883c2a85e7205a87d76 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
/*-
 * Copyright (c) 2000 Michael Smith
 * Copyright (c) 2000 BSDi
 * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * 6.3 : Scheduling services
 */

#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

#include "opt_acpi.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/taskqueue.h>

#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>

#include <dev/acpica/acpivar.h>

#define _COMPONENT	ACPI_OS_SERVICES
ACPI_MODULE_NAME("SCHEDULE")

/*
 * Allow the user to tune the maximum number of tasks we may enqueue.
 */
static int acpi_max_tasks = ACPI_MAX_TASKS;
TUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);

/*
 * Allow the user to tune the number of task threads we start.  It seems
 * some systems have problems with increased parallelism.
 */
static int acpi_max_threads = ACPI_MAX_THREADS;
TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);

static MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");

struct acpi_task_ctx {
    struct task			at_task;
    ACPI_OSD_EXEC_CALLBACK	at_function;
    void 			*at_context;
    int				at_flag;
#define	ACPI_TASK_FREE		0
#define	ACPI_TASK_USED		1
#define	ACPI_TASK_ENQUEUED	2
};

struct taskqueue		*acpi_taskq;
static struct acpi_task_ctx	*acpi_tasks;
static int			acpi_task_count;
static int			acpi_taskq_started;

/*
 * Preallocate some memory for tasks early enough.
 * malloc(9) cannot be used with spin lock held.
 */
static void
acpi_task_init(void *arg)
{

    acpi_tasks = malloc(sizeof(*acpi_tasks) * acpi_max_tasks, M_ACPITASK,
	M_WAITOK | M_ZERO);
}

SYSINIT(acpi_tasks, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_task_init, NULL);

/*
 * Initialize ACPI task queue.
 */
static void
acpi_taskq_init(void *arg)
{
    int i;

    acpi_taskq = taskqueue_create_fast("acpi_task", M_NOWAIT,
	&taskqueue_thread_enqueue, &acpi_taskq);
    taskqueue_start_threads(&acpi_taskq, acpi_max_threads, PWAIT, "acpi_task");
    if (acpi_task_count > 0) {
	if (bootverbose)
	    printf("AcpiOsExecute: enqueue %d pending tasks\n",
		acpi_task_count);
	for (i = 0; i < acpi_max_tasks; i++)
	    if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_USED,
		ACPI_TASK_USED | ACPI_TASK_ENQUEUED))
		taskqueue_enqueue(acpi_taskq, &acpi_tasks[i].at_task);
    }
    acpi_taskq_started = 1;
}

SYSINIT(acpi_taskq, SI_SUB_CONFIGURE, SI_ORDER_SECOND, acpi_taskq_init, NULL);

/*
 * Bounce through this wrapper function since ACPI-CA doesn't understand
 * the pending argument for its callbacks.
 */
static void
acpi_task_execute(void *context, int pending)
{
    struct acpi_task_ctx *at;

    at = (struct acpi_task_ctx *)context;
    at->at_function(at->at_context);
    atomic_clear_int(&at->at_flag, ACPI_TASK_USED | ACPI_TASK_ENQUEUED);
    acpi_task_count--;
}

static ACPI_STATUS
acpi_task_enqueue(int priority, ACPI_OSD_EXEC_CALLBACK Function, void *Context)
{
    struct acpi_task_ctx *at;
    int i;

    for (at = NULL, i = 0; i < acpi_max_tasks; i++)
	if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_FREE,
	    ACPI_TASK_USED)) {
	    at = &acpi_tasks[i];
	    acpi_task_count++;
	    break;
	}
    if (at == NULL) {
	printf("AcpiOsExecute: failed to enqueue task, consider increasing "
	    "the debug.acpi.max_tasks tunable\n");
	return (AE_NO_MEMORY);
    }

    TASK_INIT(&at->at_task, priority, acpi_task_execute, at);
    at->at_function = Function;
    at->at_context = Context;

    /*
     * If the task queue is ready, enqueue it now.
     */
    if (acpi_taskq_started) {
	atomic_set_int(&at->at_flag, ACPI_TASK_ENQUEUED);
	taskqueue_enqueue(acpi_taskq, &at->at_task);
	return (AE_OK);
    }
    if (bootverbose)
	printf("AcpiOsExecute: task queue not started\n");

    return (AE_OK);
}

/*
 * This function may be called in interrupt context, i.e. when a GPE fires.
 * We allocate and queue a task for one of our taskqueue threads to process.
 */
ACPI_STATUS
AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
    void *Context)
{
    int pri;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    if (Function == NULL)
	return_ACPI_STATUS (AE_BAD_PARAMETER);

    switch (Type) {
    case OSL_GPE_HANDLER:
    case OSL_NOTIFY_HANDLER:
	/*
	 * Run GPEs and Notifies at the same priority.  This allows
	 * Notifies that are generated by running a GPE's method (e.g., _L00)
	 * to not be pre-empted by a later GPE that arrives during the
	 * Notify handler execution.
	 */
	pri = 10;
	break;
    case OSL_GLOBAL_LOCK_HANDLER:
    case OSL_EC_POLL_HANDLER:
    case OSL_EC_BURST_HANDLER:
	pri = 5;
	break;
    case OSL_DEBUGGER_THREAD:
	pri = 0;
	break;
    default:
	return_ACPI_STATUS (AE_BAD_PARAMETER);
    }

    return_ACPI_STATUS (acpi_task_enqueue(pri, Function, Context));
}

void
AcpiOsSleep(UINT64 Milliseconds)
{
    int		timo;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    timo = Milliseconds * hz / 1000;

    /*
     * If requested sleep time is less than our hz resolution, use
     * DELAY instead for better granularity.
     */
    if (timo > 0)
	pause("acpislp", timo);
    else
	DELAY(Milliseconds * 1000);

    return_VOID;
}

/*
 * Return the current time in 100 nanosecond units
 */
UINT64
AcpiOsGetTimer(void)
{
    struct bintime bt;
    UINT64 t;

    /* XXX During early boot there is no (decent) timer available yet. */
    KASSERT(cold == 0, ("acpi: timer op not yet supported during boot"));

    binuptime(&bt);
    t = ((UINT64)10000000 * (uint32_t)(bt.frac >> 32)) >> 32;
    t += bt.sec * 10000000;

    return (t);
}

void
AcpiOsStall(UINT32 Microseconds)
{
    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    DELAY(Microseconds);
    return_VOID;
}

ACPI_THREAD_ID
AcpiOsGetThreadId(void)
{

    /* XXX do not add ACPI_FUNCTION_TRACE here, results in recursive call. */

    /* Returning 0 is not allowed. */
    return (curthread->td_tid);
}