Skip to content
Closed
Prev Previous commit
Next Next commit
partially address review: fix tests, add asserts, add cache modificat…
…ion guards
  • Loading branch information
Fidget-Spinner committed Jan 29, 2022
commit 19880a919812b51167981fef4e08d6ec64730028
6 changes: 5 additions & 1 deletion Objects/typeobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -8929,7 +8929,11 @@ do_super_lookup(superobject *su, PyTypeObject *su_type, PyObject *su_obj,
Py_DECREF(mro);

skip:
assert(su != NULL);
/* only happens when using manual _PySuper_Lookup, never happens in super_getattro */
if (su == NULL) {
PyErr_BadInternalCall();
return NULL;
}
return PyObject_GenericGetAttr((PyObject *)su, name);
}

Expand Down
12 changes: 8 additions & 4 deletions Python/ceval.c
Original file line number Diff line number Diff line change
Expand Up @@ -1361,6 +1361,7 @@ eval_frame_handle_pending(PyThreadState *tstate)

/* The integer overflow is checked by an assertion below. */
#define INSTR_OFFSET() ((int)(next_instr - first_instr))
#define NEXT_INSTR_OFFSET() ((int)(next_instr+1 - first_instr))
#define NEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
Expand Down Expand Up @@ -1486,6 +1487,9 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define GET_CACHE() \
_GetSpecializedCacheEntryForInstruction(first_instr, INSTR_OFFSET(), oparg)

# define GET_NEXT_INSTR_CACHE() \
_GetSpecializedCacheEntryForInstruction(first_instr, NEXT_INSTR_OFFSET(), \
_Py_OPARG(*next_instr))

#define DEOPT_IF(cond, instname) if (cond) { goto instname ## _miss; }

Expand Down Expand Up @@ -5078,8 +5082,8 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
_PyAdaptiveEntry *cache0 = &caches[0].adaptive;
_PyObjectCache *cache1 = &caches[-1].obj;
_PyAdaptiveEntry *lm_adaptive = &caches[-2].adaptive;
Comment thread
Fidget-Spinner marked this conversation as resolved.
int nargs = call_shape.total_args;
assert(nargs == 0);
assert(lm_adaptive == &GET_NEXT_INSTR_CACHE()[0].adaptive);
assert(call_shape.total_args == 0);

/* CALL_NO_KW_SUPER */
PyObject *su_obj;
Expand Down Expand Up @@ -5119,8 +5123,8 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
_PyAdaptiveEntry *cache0 = &caches[0].adaptive;
_PyObjectCache *cache1 = &caches[-1].obj;
_PyAdaptiveEntry *lm_adaptive = &caches[-2].adaptive;
int nargs = call_shape.total_args;
assert(nargs == 2);
assert(lm_adaptive == &GET_NEXT_INSTR_CACHE()[0].adaptive);
assert(call_shape.total_args == 2);
assert(call_shape.kwnames == NULL);

/* CALL_NO_KW_SUPER */
Expand Down
11 changes: 10 additions & 1 deletion Python/specialize.c
Original file line number Diff line number Diff line change
Expand Up @@ -962,6 +962,12 @@ _Py_Specialize_LoadMethod(PyObject *owner, _Py_CODEUNIT *instr, PyObject *name,
_PyObjectCache *cache2 = &cache[-2].obj;

PyTypeObject *owner_cls = Py_TYPE(owner);
_Py_CODEUNIT prev_instr = _Py_OPCODE(instr[-1]);
if (prev_instr == CALL_NO_KW_SUPER_0__LOAD_METHOD_CACHED ||
prev_instr == CALL_NO_KW_SUPER_2__LOAD_METHOD_CACHED) {
/* Our own cache entries are already being used by superinstructions. */
goto fail;
}
if (PyModule_CheckExact(owner)) {
int err = specialize_module_load_attr(owner, instr, name, cache0, cache1,
LOAD_METHOD, LOAD_METHOD_MODULE);
Expand Down Expand Up @@ -1378,6 +1384,10 @@ specialize_class_call(
/* Adaptive super instruction of CALL and LOAD_METHOD_ADAPTIVE. */
if (tp == &PySuper_Type &&
kwnames == NULL &&
/* Important: this also protects us from accidentally overriding a
the next specialized instruction's cache. We can only use the
subsequent LOAD_METHOD cache if it hasn't specialized yet.
*/
_Py_OPCODE(instr[1]) == LOAD_METHOD_ADAPTIVE &&
_Py_OPCODE(instr[-1]) == PRECALL_FUNCTION &&
(nargs == 0 || nargs == 2)) {
Expand Down Expand Up @@ -1430,7 +1440,6 @@ specialize_class_call(
SPEC_FAIL_CALL_STR : SPEC_FAIL_CLASS_NO_VECTORCALL);
return -1;
}

SPECIALIZATION_FAIL(CALL, SPEC_FAIL_CLASS_MUTABLE);
return -1;
}
Expand Down