Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Add PyType vectorcall and use vectorcall in all specialized call fall…
…backs
  • Loading branch information
youknowone committed Mar 4, 2026
commit f584c47f5da7d94b5e8b5a098fb012aab51bd763
29 changes: 29 additions & 0 deletions crates/vm/src/builtins/type.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2339,8 +2339,37 @@ fn subtype_set_dict(obj: PyObjectRef, value: PyObjectRef, vm: &VirtualMachine) -
* The magical type type
*/

/// Vectorcall for PyType (PEP 590).
/// Fast path: type(x) returns x.__class__ without constructing FuncArgs.
fn vectorcall_type(
zelf_obj: &PyObject,
args: Vec<PyObjectRef>,
nargs: usize,
kwnames: Option<&[PyObjectRef]>,
vm: &VirtualMachine,
) -> PyResult {
let zelf: &Py<PyType> = zelf_obj.downcast_ref().unwrap();

// type(x) fast path: single positional arg, no kwargs
if zelf.is(vm.ctx.types.type_type) {
let no_kwargs = kwnames.is_none_or(|kw| kw.is_empty());
if nargs == 1 && no_kwargs {
return Ok(args[0].obj_type());
}
}

// Fallback: construct FuncArgs and use standard call
let func_args = FuncArgs::from_vectorcall(&args, nargs, kwnames);
PyType::call(zelf, func_args, vm)
}

pub(crate) fn init(ctx: &'static Context) {
PyType::extend_class(ctx, ctx.types.type_type);
ctx.types
.type_type
.slots
.vectorcall
.store(Some(vectorcall_type));
}

pub(crate) fn call_slot_new(
Expand Down
47 changes: 12 additions & 35 deletions crates/vm/src/frame.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3847,8 +3847,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallMethodDescriptorO => {
let instr_idx = self.lasti() as usize - 1;
Expand Down Expand Up @@ -3885,8 +3884,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallMethodDescriptorFast => {
let instr_idx = self.lasti() as usize - 1;
Expand Down Expand Up @@ -3924,8 +3922,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallBuiltinClass => {
let instr_idx = self.lasti() as usize - 1;
Expand All @@ -3935,25 +3932,12 @@ impl ExecutingFrame<'_> {
let callable = self.nth_value(nargs + 1);
let callable_tag = callable as *const PyObject as u32;
if cached_tag == callable_tag && callable.downcast_ref::<PyType>().is_some() {
let args = self.collect_positional_args(nargs);
let self_or_null = self.pop_value_opt();
let callable = self.pop_value();
let final_args = if let Some(self_val) = self_or_null {
let mut args = args;
args.prepend_arg(self_val);
args
} else {
args
};
let result = callable.call(final_args, vm)?;
self.push_value(result);
return Ok(None);
return self.execute_call_vectorcall(nargs, vm);
}
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallAllocAndEnterInit => {
let instr_idx = self.lasti() as usize - 1;
Expand Down Expand Up @@ -4013,8 +3997,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallMethodDescriptorFastWithKeywords => {
// Native function interface is uniform regardless of keyword support
Expand Down Expand Up @@ -4053,8 +4036,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallBuiltinFastWithKeywords => {
// Native function interface is uniform regardless of keyword support
Expand Down Expand Up @@ -4087,8 +4069,7 @@ impl ExecutingFrame<'_> {
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallNonPyGeneral => {
let instr_idx = self.lasti() as usize - 1;
Expand All @@ -4098,14 +4079,12 @@ impl ExecutingFrame<'_> {
let callable = self.nth_value(nargs + 1);
let callable_tag = callable as *const PyObject as u32;
if cached_tag == callable_tag {
let args = self.collect_positional_args(nargs);
return self.execute_call(args, vm);
return self.execute_call_vectorcall(nargs, vm);
}
self.deoptimize(Instruction::Call {
argc: Arg::marker(),
});
let args = self.collect_positional_args(nargs);
self.execute_call(args, vm)
self.execute_call_vectorcall(nargs, vm)
}
Instruction::CallKwPy => {
let instr_idx = self.lasti() as usize - 1;
Expand Down Expand Up @@ -4197,14 +4176,12 @@ impl ExecutingFrame<'_> {
let callable = self.nth_value(nargs + 2);
let callable_tag = callable as *const PyObject as u32;
if cached_tag == callable_tag {
let args = self.collect_keyword_args(nargs);
return self.execute_call(args, vm);
return self.execute_call_kw_vectorcall(nargs, vm);
}
self.deoptimize(Instruction::CallKw {
argc: Arg::marker(),
});
let args = self.collect_keyword_args(nargs);
self.execute_call(args, vm)
self.execute_call_kw_vectorcall(nargs, vm)
}
Instruction::LoadSuperAttrAttr => {
let oparg = u32::from(arg);
Expand Down