#include "Python.h" #ifdef STACKLESS #include "stackless_impl.h" /******************************************************************* Exception handling revised. Every tasklet has its own exception state. The thread state exists only once for the current thread, so we need to simulate ownership. Whenever a tasklet is run for the first time, it should clear the exception variables and start with a clean state. When it dies normally, it should clean up these. When a transfer occours from one tasklet to another, the first tasklet should save its exception in local variables, and the other one should restore its own. When a tasklet dies with an uncaught exception, this will be passed on to the main tasklet. The main tasklet must clear its exception state, if any, and take over those of the tasklet resting in peace. 2002-08-08 This was a misconception. This is not the current exception, but the one which ceval provides for exception handlers. That means, sharing of this variable is totally unrelated to the current error, and we always swap the data. ********************************************************************/ slp_schedule_hook_func *_slp_schedule_fasthook; PyObject *_slp_schedule_hook; static int transfer_with_exc(PyCStackObject **cstprev, PyCStackObject *cst, PyTaskletObject *prev) { PyThreadState *ts = PyThreadState_GET(); PyObject *exc_type = ts->exc_type; PyObject *exc_value = ts->exc_value; PyObject *exc_traceback = ts->exc_traceback; int ret; ts->exc_type = ts->exc_value = ts->exc_traceback = NULL; ret = slp_transfer(cstprev, cst, prev); ts->exc_type = exc_type; ts->exc_value = exc_value; ts->exc_traceback = exc_traceback; return ret; } /* scheduler monitoring */ void slp_schedule_callback(PyTaskletObject *prev, PyTaskletObject *next) { PyObject *args; if (prev == NULL) prev = (PyTaskletObject *)Py_None; if (next == NULL) next = (PyTaskletObject *)Py_None; args = Py_BuildValue("(OO)", prev, next); if (args != NULL) { PyObject *type, *value, *traceback, *ret, *hold; PyThreadState *ts = PyThreadState_GET(); PyErr_Fetch(&type, &value, &traceback); hold = ts->st.tempval; ts->st.tempval = NULL; ret = PyObject_Call(_slp_schedule_hook, args, NULL); if (ret != NULL) PyErr_Restore(type, value, traceback); else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } ts->st.tempval = hold; Py_XDECREF(ret); Py_DECREF(args); } } #define NOTIFY_SCHEDULE(prev, next, errflag) \ if (_slp_schedule_fasthook != NULL) { \ if (ts->st.schedlock) \ return RUNTIME_ERROR("Recursive scheduler call due to callbacks!", errflag); \ ts->st.schedlock = 1; \ _slp_schedule_fasthook(prev, next); \ ts->st.schedlock = 0; \ } static void kill_wrap_bad_guy(PyTaskletObject *prev, PyTaskletObject *bad_guy) { /* * just in case a transfer didn't work, we pack the bad * tasklet into the exception and remove it from the runnables. * */ PyThreadState *ts = PyThreadState_GET(); PyObject *newval = PyTuple_New(2); if (bad_guy->next != NULL) { ts->st.current = bad_guy; slp_current_remove(); } /* restore last tasklet */ if (prev->next == NULL) slp_current_insert(prev); ts->frame = prev->f.frame; ts->st.current = prev; if (newval != NULL) { /* merge bad guy into exception */ PyObject *exc, *val, *tb; PyErr_Fetch(&exc, &val, &tb); PyTuple_SET_ITEM(newval, 0, val); PyTuple_SET_ITEM(newval, 1, (PyObject*)bad_guy); Py_INCREF(bad_guy); PyErr_Restore(exc, newval, tb); } } int slp_schedule_task(PyTaskletObject *prev, PyTaskletObject *next) { PyThreadState *ts = PyThreadState_GET(); PyCStackObject **cstprev; if (next == prev) return 0; /* since we change the stack we must assure that the protocol was met */ STACKLESS_ASSERT(); ts->st.ticker = ts->st.interval; if (prev != NULL) { prev->recursion_depth = ts->recursion_depth; /* note: nesting_level is handled in cstack_new */ prev->flags = ts->st.flags; prev->f.frame = ts->frame; prev->tempval = ts->st.tempval; cstprev = &prev->cstate; } else cstprev = NULL; /* note this should go here, since it uses tempval */ NOTIFY_SCHEDULE(prev, next, -1); ts->recursion_depth = next->recursion_depth; ts->st.flags = next->flags; ts->frame = next->f.frame; next->f.frame = NULL; ts->st.tempval = next->tempval; next->tempval = NULL; if (!next->flags.blocked) /* that must be done by the channel */ ts->st.current = next; if (ts->exc_type == Py_None) { Py_XDECREF(ts->exc_type); ts->exc_type = NULL; } ++ts->st.nesting_level; if ((ts->exc_type != NULL ? transfer_with_exc : slp_transfer)(cstprev, next->cstate, prev) == 0) { --ts->st.nesting_level; return 0; } else { --ts->st.nesting_level; kill_wrap_bad_guy(prev, next); return -1; } } /* non-recursive scheduling */ typedef struct _exc_frame { PyBaseFrameObject bf; PyObject *exc_type; PyObject *exc_value; PyObject *exc_traceback; } exc_frame; #define EXC_FRAME_SIZE ((sizeof(exc_frame)-sizeof(PyBaseFrameObject))/sizeof(PyObject*)) static PyObject * restore_exception(PyFrameObject *f) { PyThreadState *ts = PyThreadState_GET(); exc_frame *ef = (exc_frame *) f; f = ef->bf.f_back; ts->exc_type = ef->exc_type; ts->exc_value = ef->exc_value; ts->exc_traceback = ef->exc_traceback; ef->exc_type = ef->exc_traceback = ef->exc_value = NULL; Py_INCREF(f); Py_DECREF((PyFrameObject *)ef); ts->frame = f; return Py_UnwindToken; } /* jumping from a soft tasklet to a hard switched */ static PyObject * jump_soft_to_hard(PyFrameObject *f) { PyThreadState *ts = PyThreadState_GET(); ts->frame = f->f_back; Py_DECREF(f); slp_transfer(NULL, ts->st.current->cstate, NULL); /* we either have an error or don't come back, so: */ return NULL; } int slp_schedule_nr_maybe(PyTaskletObject *prev, PyTaskletObject *next) { /* * we check if we can do a non-recursive transfer. * * retval == 0 --> can't do it * retval == 1 --> ready to take off * retval == -1 --> exception occoured * * In order to save stack space, we don't call * the stack switch, but leave it to the caller. */ PyThreadState *ts = PyThreadState_GET(); if (prev == next) return 0; if (!slp_enable_softswitch) return 0; if (ts->st.nesting_level != 0) return 0; /* we can't handle escape from a channel with soft switching, yet */ if (next->flags.blocked) { return 0; } ts->st.ticker = ts->st.interval; if (prev != NULL) { prev->recursion_depth = ts->recursion_depth; prev->flags = ts->st.flags; prev->tempval = ts->st.tempval; ts->st.tempval = NULL; if (prev->cstate != ts->st.initial_stub) { Py_DECREF(prev->cstate); prev->cstate = ts->st.initial_stub; Py_INCREF(prev->cstate); } /* handle exception */ if (ts->exc_type == Py_None) { Py_XDECREF(ts->exc_type); ts->exc_type = NULL; } else if (ts->exc_type != NULL) { /* build a shadow frame */ exc_frame *f = (exc_frame *) slp_baseframe_new(restore_exception, 1, EXC_FRAME_SIZE); if (f == NULL) return -1; f->exc_type = ts->exc_type; f->exc_value = ts->exc_value; f->exc_traceback = ts->exc_traceback; ts->frame = (PyFrameObject *) f; ts->exc_type = ts->exc_value = ts->exc_traceback = NULL; } prev->f.frame = ts->frame; } if (next->topframe != NULL && next->topframe->f_back == NULL) { /* this is a new tasklet. Provide the runner */ PyFrameObject *f = next->f.frame; PyFrameObject *runner; assert(f->f_back == NULL); runner = ts->st.tasklet_runner; f->f_back = runner; Py_INCREF(runner); } ts->frame = next->f.frame; next->f.frame = NULL; assert(next->cstate != NULL); if (next->cstate->nesting_level != 0) { /* create a helper frame to restore the target stack */ ts->frame = (PyFrameObject *) slp_baseframe_new(jump_soft_to_hard, 1, 0); if (ts->frame == NULL) { ts->frame = prev->f.frame; return -1; } } ts->st.flags = next->flags; ts->recursion_depth = next->recursion_depth; /* careful about notify, it might use retval */ NOTIFY_SCHEDULE(prev, next, -1); ts->st.current = next; ts->st.tempval = next->tempval; next->tempval = NULL; return 1; } static PyFrameObject * load_state_from_task(PyTaskletObject *task) { PyThreadState *ts = PyThreadState_GET(); assert(PyTasklet_Check(task)); ts->st.flags = task->flags; ts->recursion_depth = task->recursion_depth; assert(task->cstate != NULL); ts->st.nesting_level = task->cstate->nesting_level; ts->frame = task->f.frame; return task->f.frame; } /* * reviving main. * it is (now) required, that main is always present. * reviving just means to bring it back into play. */ int slp_revive_main(void) { PyThreadState *ts = PyThreadState_GET(); assert(ts->st.main->f.frame != NULL); if (ts->st.main->next == NULL) { Py_INCREF(ts->st.main); slp_current_insert(ts->st.main); return 0; } return -1; } static PyObject * tasklet_end(PyFrameObject *runner); int initialize_main_and_current(PyFrameObject *f) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *task; PyObject *noargs; PyBaseFrameObject *runner; /* refuse executing main in an unhandled error context */ if (! (PyErr_Occurred() == NULL || PyErr_Occurred() == Py_None) ) { #ifdef _DEBUG PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); Py_XINCREF(type); Py_XINCREF(value); Py_XINCREF(traceback); PyErr_Restore(type, value, traceback); printf("Pending error while entering Stackless subsystem:\n"); PyErr_Print(); printf("Above exception is re-raised and passed to the caller.\n"); PyErr_Restore(type, value, traceback); #endif return 1; } /* create end-handler */ if (ts->st.tasklet_runner == NULL) { runner = slp_baseframe_new(tasklet_end, 0, 0); if (runner == NULL) return -1; ts->st.tasklet_runner = (PyFrameObject *) runner; Py_INCREF(runner); } noargs = PyTuple_New(0); task = (PyTaskletObject *) PyTasklet_Type.tp_new(&PyTasklet_Type, noargs, NULL); Py_DECREF(noargs); if (task == NULL) return -1; task->f.frame = f; task->topframe = f; Py_INCREF(f); assert(task->cstate != NULL); task->cstate->task = task; load_state_from_task(task); ts->st.main = task; Py_INCREF(task); slp_current_insert(task); ts->st.current = task; NOTIFY_SCHEDULE(NULL, task, -1); return 0; } static PyObject * tasklet_end(PyFrameObject *runner) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *task = ts->st.current; PyObject *result = ts->st.tempval; int ismain = task == ts->st.main; if (ismain) { /* See whether we need to adjust main's context before returning */ if (ts->st.serial_last_jump != ts->st.serial) { /* little hack to be not a new tasklet */ runner->f_back = (PyFrameObject *) Py_None; slp_transfer(NULL, ts->st.current->cstate, NULL); } runner->f_back = NULL; } ts->st.tempval = NULL; /* these may need to be updated */ task->f.frame = task->topframe; task->flags = ts->st.flags; task->recursion_depth = 0; /* Deallocation might cause stack switches during tasklet_end! * Therefore, we need to stay reentrant and keep current alive. * The runner frame exists only once, unfortunately. * We need to use topframe as current frame in order to * store a cstack in case of a switch. */ ts->frame = task->topframe; /* see whether we have a TaskletExit, which is no error */ if (result == NULL && PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_TaskletExit)) { PyErr_Clear(); Py_INCREF(Py_None); result = Py_None; } /* notify before destroying anything */ NOTIFY_SCHEDULE(task, NULL, NULL); /* clear the tasklet, while leaving it in place */ task->f.frame = NULL; /* avoid decref */ task->topframe = NULL; /* avoid a kill() */ task->ob_type->tp_clear((PyObject *)task); /* also clear the frame but leave it in place */ ++ts->recursion_depth; ts->frame->ob_type->tp_clear((PyObject *)ts->frame); --ts->recursion_depth; /* let the task dealloc the frame. Note that the frame *could* have been assigned already due to a switch during dealloc, so we just enforce it. */ task->f.frame = ts->frame; ts->frame = NULL; /* safely clear again, now with frame */ task->ob_type->tp_clear((PyObject *)task); /* * clean up any current exception - this tasklet is dead. * This only happens if we are killing tasklets in the middle * of their execution. */ if (ts->exc_type != NULL && ts->exc_type != Py_None) { Py_DECREF(ts->exc_type); Py_XDECREF(ts->exc_value); Py_XDECREF(ts->exc_traceback); ts->exc_type = ts->exc_value = ts->exc_traceback = NULL; } /* * put the result back into the dead tasklet, to give * possible referers access to the return value */ task->tempval = result; ts->st.current = task; slp_current_remove(); Py_DECREF(task); /* capture all exceptions */ if (ismain) { /* * Main wants to exit. We clean up, but leave the * runnables chain intact. */ ts->st.main = NULL; result = task->tempval; Py_XINCREF(result); Py_DECREF(task); return result; } if (ts->st.runcount == 0) { if (slp_revive_main()) { /* main is blocked and nobody can send */ if (ts->st.main->flags.blocked < 0) RUNTIME_ERROR("the main tasklet is receiving without a sender available.", NULL); else RUNTIME_ERROR("the main tasklet is sending without a receiver available.", NULL); /* fall through to error handling */ result = NULL; } } assert(ts->st.runcount > 0 || ts->st.main->flags.blocked); if (result == NULL) { /* * error handling: continue in the context of the main tasklet. */ slp_revive_main(); /* propagate error */ if (ts->st.main->tempval != NULL) { Py_DECREF(ts->st.main->tempval); ts->st.main->tempval = NULL; } switch (slp_schedule_nr_maybe(NULL, ts->st.main)) { case -1: return NULL; case 1: return Py_UnwindToken; } if (slp_schedule_task(NULL, ts->st.main)) return NULL; /* we do not come back here */ } assert(ts->st.current->f.frame != NULL); assert(ts->st.current->f.frame->ob_refcnt > 0); switch (slp_schedule_nr_maybe(NULL, ts->st.current)) { case -1: return NULL; case 1: return Py_UnwindToken; } if (slp_schedule_task(NULL, ts->st.current)) return NULL; result = ts->st.tempval; ts->st.tempval = NULL; return result; } /* the following function only has to handle "real" tasklets, those which need to switch the C stack. The "soft" tasklets are handled by frame pushing. It is not so much simpler than I thought :-( */ PyObject * slp_run_tasklet(PyFrameObject *f) { PyThreadState *ts = PyThreadState_GET(); PyFrameObject *runner; if ( (ts->st.main == NULL) && initialize_main_and_current(f)) { ts->frame = NULL; return NULL; } assert(f->f_back == NULL); runner = ts->st.tasklet_runner; f->f_back = runner; Py_INCREF(runner); return slp_frame_dispatch_top(f); } #endif