@@ -64,31 +64,38 @@ class ExecutorBackend final : public PyTorchBackendInterface {
6464 ET_ALLOCATE_INSTANCE_OR_RETURN_ERROR (runtime_allocator, Program);
6565 new (client_program) Program (std::move (program_result.get ()));
6666
67+ Result<MethodMeta> method_meta = client_program->method_meta (" forward" );
68+ if (!method_meta.ok ()) {
69+ ET_LOG (Error, " error constructing method meta" );
70+ return method_meta.error ();
71+ }
72+
6773 // Building all different allocators for the client executor
6874 auto client_const_allocator = ET_ALLOCATE_INSTANCE_OR_RETURN_ERROR (
6975 runtime_allocator, MemoryAllocator);
7076 new (client_const_allocator) MemoryAllocator (0 , nullptr );
7177
72- size_t num_non_const_buffers = client_program->num_non_const_buffers () - 1 ;
78+ auto num_buffers = method_meta->num_non_const_buffers ();
79+ size_t num_non_const_buffers = num_buffers - 1 ;
7380
7481 uint8_t ** non_const_buffers = ET_ALLOCATE_LIST_OR_RETURN_ERROR (
7582 runtime_allocator, uint8_t *, num_non_const_buffers);
7683 MemoryAllocator* non_const_allocators = ET_ALLOCATE_LIST_OR_RETURN_ERROR (
7784 runtime_allocator, MemoryAllocator, num_non_const_buffers);
7885
79- for (size_t id = 1 ; id < client_program-> num_non_const_buffers () ; ++id) {
80- const size_t buffer_size = client_program-> get_non_const_buffer_size (id);
86+ for (size_t id = 1 ; id < num_buffers ; ++id) {
87+ auto buffer_size = method_meta-> non_const_buffer_size (id);
8188 uint8_t * buffer_i = ET_ALLOCATE_LIST_OR_RETURN_ERROR (
82- runtime_allocator, uint8_t , buffer_size);
89+ runtime_allocator, uint8_t , buffer_size. get () );
8390 non_const_buffers[id - 1 ] = buffer_i;
8491 new (&non_const_allocators[id - 1 ])
85- MemoryAllocator (static_cast <uint32_t >(buffer_size), buffer_i);
92+ MemoryAllocator (static_cast <uint32_t >(buffer_size. get () ), buffer_i);
8693 }
8794
8895 auto client_non_const_allocator = ET_ALLOCATE_INSTANCE_OR_RETURN_ERROR (
8996 runtime_allocator, HierarchicalAllocator);
90- new (client_non_const_allocator) HierarchicalAllocator (
91- client_program-> num_non_const_buffers () - 1 , non_const_allocators);
97+ new (client_non_const_allocator)
98+ HierarchicalAllocator (num_non_const_buffers , non_const_allocators);
9299
93100 // Allocate some memory from runtime allocator for the client executor, in
94101 // real case, like if it's an executor in dsp, it should allocate memory
@@ -113,46 +120,49 @@ class ExecutorBackend final : public PyTorchBackendInterface {
113120 client_runtime_allocator,
114121 client_temp_allocator);
115122
116- // Construct the client executor
117- auto client_executor =
118- ET_ALLOCATE_INSTANCE_OR_RETURN_ERROR (runtime_allocator, Executor);
119- new (client_executor) Executor (client_program, client_memory_manager);
120-
121- // Initialize the client executor
122- Error err = client_executor->init_execution_plan (" forward" );
123- if (err != Error::Ok) {
124- ET_LOG (Error, " Failed to init client executor: 0x%x" , (unsigned int )err);
125- return err;
123+ // Construct the client Method
124+ Result<Method> method_res =
125+ client_program->load_method (" forward" , client_memory_manager);
126+ if (!method_res.ok ()) {
127+ ET_LOG (
128+ Error,
129+ " Failed to load client method: 0x%x" ,
130+ (unsigned int )method_res.error ());
131+ return method_res.error ();
126132 }
127133
128- // Return the client executor so it will be passed to `execute()` as
134+ auto client_method =
135+ ET_ALLOCATE_INSTANCE_OR_RETURN_ERROR (runtime_allocator, Method);
136+ new (client_method) Method (std::move (method_res.get ()));
137+
138+ // Return the client method so it will be passed to `execute()` as
129139 // `handle`.
130- return client_executor ;
140+ return client_method ;
131141 }
132142
133143 Error execute (DelegateHandle* handle, EValue** args) const override {
134- Executor* client_executor = static_cast <Executor*>(handle);
135- auto & plan = client_executor->execution_plan ();
136- auto plan_inputs_size = plan.inputs_size ();
144+ Method* client_method = static_cast <Method*>(handle);
145+ auto num_inputs = client_method->inputs_size ();
137146 Error status = Error::Ok;
138147
139148 // Receive client executor input
140- for (size_t input_idx = 0 ; input_idx < plan_inputs_size ; input_idx++) {
141- status = plan. set_input (*args[input_idx], input_idx);
149+ for (size_t input_idx = 0 ; input_idx < num_inputs ; input_idx++) {
150+ status = client_method-> set_input (*args[input_idx], input_idx);
142151 }
143152 // Execute client executor
144- status = plan. execute ();
153+ status = client_method-> execute ();
145154
146155 // Send the client executor output
147- status = plan.get_outputs (args[plan_inputs_size], plan.outputs_size ());
156+ status = client_method->get_outputs (
157+ args[num_inputs], client_method->outputs_size ());
148158
149159 return status;
150160 }
151161
152162 void destroy (DelegateHandle* handle) const override {
153163 if (handle != nullptr ) {
154- Executor * client_executor = static_cast <Executor *>(handle);
155- client_executor->~Executor ();
164+ Method * client_executor = static_cast <Method *>(handle);
165+ client_executor->~Method ();
156166 }
157167 }
158168};
0 commit comments