Skip to content
Snippets Groups Projects
Commit c02dd787 authored by Georgy Kirichenko's avatar Georgy Kirichenko Committed by Roman Tsisyk
Browse files

Remove struct tarantool_coro and coro.h

* Merge `struct tarantool_coro` into `struct fiber`
* Delete coro.c and coro.h files.

Refactoring for #2438
parent a3692871
No related branches found
No related tags found
No related merge requests found
......@@ -71,7 +71,6 @@ set (core_sources
cbus.c
fiber_pool.c
exception.cc
coro.c
reflection.c
assoc.c
rmean.c
......
......@@ -340,8 +340,8 @@ print_backtrace()
stack_top = frame; /* we don't know where the system stack top is */
stack_size = (const char *) __libc_stack_end - (const char *) frame;
} else {
stack_top = fiber()->coro.stack;
stack_size = fiber()->coro.stack_size;
stack_top = fiber()->stack;
stack_size = fiber()->stack_size;
}
fdprintf(STDERR_FILENO, "%s", backtrace(frame, stack_top, stack_size));
......
/*
* Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the
* following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "coro.h"
#include "trivia/config.h"
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
#include "small/slab_cache.h"
#include "third_party/valgrind/memcheck.h"
#include "diag.h"
#include "tt_pthread.h"
#if ENABLE_ASAN
#include <sanitizer/asan_interface.h>
#endif
/*
* coro_stack_size, coro_stack_offset
* coro_guard_size, coro_guard_offset
*
* Stack geometry: relative placement of stack section and guard
* section, if any. Offsets are relative to the begining of an aligned
* memory block to host both stack and guard side by side.
*
* Note: we assume that the memory comes from a slab allocator and
* contains a slab header at the beginning we should not touch.
*/
static size_t coro_page_size;
static size_t coro_stack_size;
static int coro_stack_direction;
enum {
CORO_STACK_PAGES = 16,
};
static inline void *
coro_page_align_down(void *ptr)
{
return (void *)((intptr_t)ptr & ~(coro_page_size - 1));
}
static inline void *
coro_page_align_up(void *ptr)
{
return coro_page_align_down(ptr + coro_page_size - 1);
}
static __attribute__((noinline)) bool
test_stack_grows_down(void *prev_stack_frame)
{
return __builtin_frame_address(0) < prev_stack_frame;
}
void
tarantool_coro_init()
{
coro_page_size = sysconf(_SC_PAGESIZE);
coro_stack_size = coro_page_size * CORO_STACK_PAGES;
coro_stack_direction =
test_stack_grows_down(__builtin_frame_address(0)) ? -1: 1;
}
int
tarantool_coro_create(struct tarantool_coro *coro,
struct slab_cache *slabc,
void (*f) (void *), void *data)
{
memset(coro, 0, sizeof(*coro));
coro->stack_slab = (char *) slab_get(slabc, coro_stack_size);
if (coro->stack_slab == NULL) {
diag_set(OutOfMemory, coro_stack_size,
"runtime arena", "coro stack");
return -1;
}
void *guard;
/* Adjust begin and size for stack memory chunk. */
if (coro_stack_direction < 0) {
/*
* A stack grows down. First page after begin of a
* stack memory chunk should be protected and memory
* after protected page until end of memory chunk can be
* used for coro stack usage.
*/
guard = coro_page_align_up(coro->stack_slab + slab_sizeof());
coro->stack = guard + coro_page_size;
coro->stack_size = coro_stack_size -
(coro->stack - coro->stack_slab);
} else {
/*
* A stack grows up. Last page should be protected and
* memory from begin of chunk until protected page can
* be used for coro stack usage
*/
guard = coro_page_align_down(coro->stack_slab +
coro_stack_size) -
coro_page_size;
coro->stack = coro->stack_slab + slab_sizeof();
coro->stack_size = guard - coro->stack;
}
coro->stack_id = VALGRIND_STACK_REGISTER(coro->stack,
(char *) coro->stack +
coro->stack_size);
mprotect(guard, coro_page_size, PROT_NONE);
coro_create(&coro->ctx, f, data, coro->stack, coro->stack_size);
return 0;
}
void
tarantool_coro_destroy(struct tarantool_coro *coro, struct slab_cache *slabc)
{
if (coro->stack != NULL) {
VALGRIND_STACK_DEREGISTER(coro->stack_id);
#if ENABLE_ASAN
ASAN_UNPOISON_MEMORY_REGION(coro->stack, coro->stack_size);
#endif
void *guard;
if (coro_stack_direction < 0)
guard = coro_page_align_up(coro->stack_slab + slab_sizeof());
else
guard = coro_page_align_down(coro->stack_slab +
coro_stack_size) -
coro_page_size;
mprotect(guard, coro_page_size, PROT_READ | PROT_WRITE);
slab_put(slabc, coro->stack_slab);
}
}
#ifndef TARANTOOL_CORO_H_INCLUDED
#define TARANTOOL_CORO_H_INCLUDED
/*
* Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the
* following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stddef.h> /* size_t */
#include <third_party/coro/coro.h>
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
struct tarantool_coro {
coro_context ctx;
/** Coro stack slab. */
void *stack_slab;
/** Coro stack addr. */
void *stack;
/** Coro stack size. */
size_t stack_size;
/** Valgrind stack id. */
unsigned int stack_id;
};
struct slab_cache;
void
tarantool_coro_init();
int
tarantool_coro_create(struct tarantool_coro *ctx,
struct slab_cache *cache,
void (*f) (void *), void *data);
void
tarantool_coro_destroy(struct tarantool_coro *ctx,
struct slab_cache *cache);
#if defined(__cplusplus)
} /* extern "C" */
#endif /* defined(__cplusplus) */
#endif /* TARANTOOL_CORO_H_INCLUDED */
......@@ -29,6 +29,9 @@
* SUCH DAMAGE.
*/
#include "fiber.h"
#include <trivia/config.h>
#include <trivia/util.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
......@@ -39,6 +42,8 @@
#include "memory.h"
#include "trigger.h"
#include "third_party/valgrind/memcheck.h"
static int (*fiber_invoke)(fiber_func f, va_list ap);
#if ENABLE_ASAN
......@@ -78,6 +83,14 @@ static struct cord main_cord;
__thread struct cord *cord_ptr = NULL;
pthread_t main_thread_id;
static size_t page_size;
static int stack_direction;
enum {
/** The number of pages to use for fiber stack */
FIBER_STACK_PAGES = 16,
};
static void
update_last_stack_frame(struct fiber *fiber)
{
......@@ -121,9 +134,9 @@ fiber_call_impl(struct fiber *callee)
callee->flags &= ~FIBER_IS_READY;
callee->csw++;
ASAN_START_SWITCH_FIBER(asan_state, 1,
callee->coro.stack,
callee->coro.stack_size);
coro_transfer(&caller->coro.ctx, &callee->coro.ctx);
callee->stack,
callee->stack_size);
coro_transfer(&caller->ctx, &callee->ctx);
ASAN_FINISH_SWITCH_FIBER(asan_state);
}
......@@ -358,9 +371,9 @@ fiber_yield(void)
callee->flags &= ~FIBER_IS_READY;
ASAN_START_SWITCH_FIBER(asan_state,
(caller->flags & FIBER_IS_DEAD) == 0,
callee->coro.stack,
callee->coro.stack_size);
coro_transfer(&caller->coro.ctx, &callee->coro.ctx);
callee->stack,
callee->stack_size);
coro_transfer(&caller->ctx, &callee->ctx);
ASAN_FINISH_SWITCH_FIBER(asan_state);
}
......@@ -617,6 +630,80 @@ fiber_set_key(struct fiber *fiber, enum fiber_key key, void *value);
extern inline void *
fiber_get_key(struct fiber *fiber, enum fiber_key key);
static inline void *
page_align_down(void *ptr)
{
return (void *)((intptr_t)ptr & ~(page_size - 1));
}
static inline void *
page_align_up(void *ptr)
{
return page_align_down(ptr + page_size - 1);
}
static int
fiber_stack_create(struct fiber *fiber, size_t stack_size)
{
stack_size -= slab_sizeof();
fiber->stack_slab = slab_get(&cord()->slabc, stack_size);
if (fiber->stack_slab == NULL) {
diag_set(OutOfMemory, stack_size,
"runtime arena", "fiber stack");
return -1;
}
void *guard;
/* Adjust begin and size for stack memory chunk. */
if (stack_direction < 0) {
/*
* A stack grows down. First page after begin of a
* stack memory chunk should be protected and memory
* after protected page until end of memory chunk can be
* used for coro stack usage.
*/
guard = page_align_up(slab_data(fiber->stack_slab));
fiber->stack = guard + page_size;
fiber->stack_size = slab_data(fiber->stack_slab) + stack_size -
fiber->stack;
} else {
/*
* A stack grows up. Last page should be protected and
* memory from begin of chunk until protected page can
* be used for coro stack usage
*/
guard = page_align_down(fiber->stack_slab + stack_size) -
page_size;
fiber->stack = fiber->stack_slab + slab_sizeof();
fiber->stack_size = guard - fiber->stack;
}
fiber->stack_id = VALGRIND_STACK_REGISTER(fiber->stack,
(char *)fiber->stack +
fiber->stack_size);
mprotect(guard, page_size, PROT_NONE);
return 0;
}
static void
fiber_stack_destroy(struct fiber *fiber, struct slab_cache *slabc)
{
if (fiber->stack != NULL) {
VALGRIND_STACK_DEREGISTER(fiber->stack_id);
#if ENABLE_ASAN
ASAN_UNPOISON_MEMORY_REGION(fiber->stack, fiber->stack_size);
#endif
void *guard;
if (stack_direction < 0)
guard = page_align_down(fiber->stack - page_size);
else
guard = page_align_up(fiber->stack + fiber->stack_size);
mprotect(guard, page_size, PROT_READ | PROT_WRITE);
slab_put(slabc, fiber->stack_slab);
}
}
/**
* Create a new fiber.
*
......@@ -648,11 +735,13 @@ fiber_new(const char *name, fiber_func f)
}
memset(fiber, 0, sizeof(struct fiber));
if (tarantool_coro_create(&fiber->coro, &cord->slabc,
fiber_loop, NULL)) {
if (fiber_stack_create(fiber, FIBER_STACK_PAGES * page_size)) {
mempool_free(&cord->fiber_mempool, fiber);
return NULL;
}
memset(&fiber->ctx, 0, sizeof(fiber->ctx));
coro_create(&fiber->ctx, fiber_loop, NULL,
fiber->stack, fiber->stack_size);
region_create(&fiber->gc, &cord->slabc);
......@@ -695,7 +784,7 @@ fiber_destroy(struct cord *cord, struct fiber *f)
trigger_destroy(&f->on_stop);
rlist_del(&f->state);
region_destroy(&f->gc);
tarantool_coro_destroy(&f->coro, &cord->slabc);
fiber_stack_destroy(f, &cord->slabc);
diag_destroy(&f->diag);
}
......@@ -746,11 +835,11 @@ cord_create(struct cord *cord, const char *name)
#if ENABLE_ASAN
/* Record stack extents */
tt_pthread_attr_getstack(cord->id, &cord->sched.coro.stack,
&cord->sched.coro.stack_size);
tt_pthread_attr_getstack(cord->id, &cord->sched.stack,
&cord->sched.stack_size);
#else
cord->sched.coro.stack = NULL;
cord->sched.coro.stack_size = 0;
cord->sched.stack = NULL;
cord->sched.stack_size = 0;
#endif
}
......@@ -1049,10 +1138,17 @@ cord_slab_cache(void)
return &cord()->slabc;
}
static NOINLINE int
check_stack_direction(void *prev_stack_frame)
{
return __builtin_frame_address(0) < prev_stack_frame ? -1: 1;
}
void
fiber_init(int (*invoke)(fiber_func f, va_list ap))
{
tarantool_coro_init();
page_size = sysconf(_SC_PAGESIZE);
stack_direction = check_stack_direction(__builtin_frame_address(0));
fiber_invoke = invoke;
main_thread_id = pthread_self();
main_cord.loop = ev_default_loop(EVFLAG_AUTO | EVFLAG_ALLOCFD);
......
......@@ -38,13 +38,14 @@
#include "tt_pthread.h"
#include "third_party/tarantool_ev.h"
#include "diag.h"
#include "coro.h"
#include "trivia/util.h"
#include "small/mempool.h"
#include "small/region.h"
#include "small/rlist.h"
#include "salad/stailq.h"
#include <third_party/coro/coro.h>
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
......@@ -244,7 +245,15 @@ cord_slab_cache(void);
/** \endcond public */
struct fiber {
struct tarantool_coro coro;
coro_context ctx;
/** Coro stack slab. */
struct slab *stack_slab;
/** Coro stack addr. */
void *stack;
/** Coro stack size. */
size_t stack_size;
/** Valgrind stack id. */
unsigned int stack_id;
/* A garbage-collected memory pool. */
struct region gc;
#ifdef ENABLE_BACKTRACE
......
......@@ -222,7 +222,7 @@ lbox_fiber_statof(struct fiber *f, void *cb_ctx)
lua_pushnumber(L, region_used(&f->gc));
lua_settable(L, -3);
lua_pushstring(L, "total");
lua_pushnumber(L, region_total(&f->gc) + f->coro.stack_size +
lua_pushnumber(L, region_total(&f->gc) + f->stack_size +
sizeof(struct fiber));
lua_settable(L, -3);
lua_settable(L, -3);
......@@ -233,7 +233,7 @@ lbox_fiber_statof(struct fiber *f, void *cb_ctx)
if (f != fiber()) {
backtrace_foreach(fiber_backtrace_cb,
f->last_stack_frame,
f->coro.stack, f->coro.stack_size, L);
f->stack, f->stack_size, L);
}
lua_settable(L, -3);
#endif /* ENABLE_BACKTRACE */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment