2424#include <linux/kernel.h>
2525#include <linux/mutex.h>
2626#include <linux/slab.h>
27- #include <linux/ftrace.h>
2827#include <linux/list.h>
2928#include <linux/kallsyms.h>
3029#include <linux/livepatch.h>
3130#include <linux/elf.h>
3231#include <linux/moduleloader.h>
3332#include <asm/cacheflush.h>
34-
35- /**
36- * struct klp_ops - structure for tracking registered ftrace ops structs
37- *
38- * A single ftrace_ops is shared between all enabled replacement functions
39- * (klp_func structs) which have the same old_addr. This allows the switch
40- * between function versions to happen instantaneously by updating the klp_ops
41- * struct's func_stack list. The winner is the klp_func at the top of the
42- * func_stack (front of the list).
43- *
44- * @node: node for the global klp_ops list
45- * @func_stack: list head for the stack of klp_func's (active func is on top)
46- * @fops: registered ftrace ops struct
47- */
48- struct klp_ops {
49- struct list_head node ;
50- struct list_head func_stack ;
51- struct ftrace_ops fops ;
52- };
33+ #include "patch.h"
5334
5435/*
5536 * The klp_mutex protects the global lists and state transitions of any
@@ -60,28 +41,12 @@ struct klp_ops {
6041static DEFINE_MUTEX (klp_mutex );
6142
6243static LIST_HEAD (klp_patches );
63- static LIST_HEAD (klp_ops );
6444
6545static struct kobject * klp_root_kobj ;
6646
6747/* TODO: temporary stub */
6848void klp_update_patch_state (struct task_struct * task ) {}
6949
70- static struct klp_ops * klp_find_ops (unsigned long old_addr )
71- {
72- struct klp_ops * ops ;
73- struct klp_func * func ;
74-
75- list_for_each_entry (ops , & klp_ops , node ) {
76- func = list_first_entry (& ops -> func_stack , struct klp_func ,
77- stack_node );
78- if (func -> old_addr == old_addr )
79- return ops ;
80- }
81-
82- return NULL ;
83- }
84-
8550static bool klp_is_module (struct klp_object * obj )
8651{
8752 return obj -> name ;
@@ -314,171 +279,6 @@ static int klp_write_object_relocations(struct module *pmod,
314279 return ret ;
315280}
316281
317- static void notrace klp_ftrace_handler (unsigned long ip ,
318- unsigned long parent_ip ,
319- struct ftrace_ops * fops ,
320- struct pt_regs * regs )
321- {
322- struct klp_ops * ops ;
323- struct klp_func * func ;
324-
325- ops = container_of (fops , struct klp_ops , fops );
326-
327- rcu_read_lock ();
328- func = list_first_or_null_rcu (& ops -> func_stack , struct klp_func ,
329- stack_node );
330- if (WARN_ON_ONCE (!func ))
331- goto unlock ;
332-
333- klp_arch_set_pc (regs , (unsigned long )func -> new_func );
334- unlock :
335- rcu_read_unlock ();
336- }
337-
338- /*
339- * Convert a function address into the appropriate ftrace location.
340- *
341- * Usually this is just the address of the function, but on some architectures
342- * it's more complicated so allow them to provide a custom behaviour.
343- */
344- #ifndef klp_get_ftrace_location
345- static unsigned long klp_get_ftrace_location (unsigned long faddr )
346- {
347- return faddr ;
348- }
349- #endif
350-
351- static void klp_unpatch_func (struct klp_func * func )
352- {
353- struct klp_ops * ops ;
354-
355- if (WARN_ON (!func -> patched ))
356- return ;
357- if (WARN_ON (!func -> old_addr ))
358- return ;
359-
360- ops = klp_find_ops (func -> old_addr );
361- if (WARN_ON (!ops ))
362- return ;
363-
364- if (list_is_singular (& ops -> func_stack )) {
365- unsigned long ftrace_loc ;
366-
367- ftrace_loc = klp_get_ftrace_location (func -> old_addr );
368- if (WARN_ON (!ftrace_loc ))
369- return ;
370-
371- WARN_ON (unregister_ftrace_function (& ops -> fops ));
372- WARN_ON (ftrace_set_filter_ip (& ops -> fops , ftrace_loc , 1 , 0 ));
373-
374- list_del_rcu (& func -> stack_node );
375- list_del (& ops -> node );
376- kfree (ops );
377- } else {
378- list_del_rcu (& func -> stack_node );
379- }
380-
381- func -> patched = false;
382- }
383-
384- static int klp_patch_func (struct klp_func * func )
385- {
386- struct klp_ops * ops ;
387- int ret ;
388-
389- if (WARN_ON (!func -> old_addr ))
390- return - EINVAL ;
391-
392- if (WARN_ON (func -> patched ))
393- return - EINVAL ;
394-
395- ops = klp_find_ops (func -> old_addr );
396- if (!ops ) {
397- unsigned long ftrace_loc ;
398-
399- ftrace_loc = klp_get_ftrace_location (func -> old_addr );
400- if (!ftrace_loc ) {
401- pr_err ("failed to find location for function '%s'\n" ,
402- func -> old_name );
403- return - EINVAL ;
404- }
405-
406- ops = kzalloc (sizeof (* ops ), GFP_KERNEL );
407- if (!ops )
408- return - ENOMEM ;
409-
410- ops -> fops .func = klp_ftrace_handler ;
411- ops -> fops .flags = FTRACE_OPS_FL_SAVE_REGS |
412- FTRACE_OPS_FL_DYNAMIC |
413- FTRACE_OPS_FL_IPMODIFY ;
414-
415- list_add (& ops -> node , & klp_ops );
416-
417- INIT_LIST_HEAD (& ops -> func_stack );
418- list_add_rcu (& func -> stack_node , & ops -> func_stack );
419-
420- ret = ftrace_set_filter_ip (& ops -> fops , ftrace_loc , 0 , 0 );
421- if (ret ) {
422- pr_err ("failed to set ftrace filter for function '%s' (%d)\n" ,
423- func -> old_name , ret );
424- goto err ;
425- }
426-
427- ret = register_ftrace_function (& ops -> fops );
428- if (ret ) {
429- pr_err ("failed to register ftrace handler for function '%s' (%d)\n" ,
430- func -> old_name , ret );
431- ftrace_set_filter_ip (& ops -> fops , ftrace_loc , 1 , 0 );
432- goto err ;
433- }
434-
435-
436- } else {
437- list_add_rcu (& func -> stack_node , & ops -> func_stack );
438- }
439-
440- func -> patched = true;
441-
442- return 0 ;
443-
444- err :
445- list_del_rcu (& func -> stack_node );
446- list_del (& ops -> node );
447- kfree (ops );
448- return ret ;
449- }
450-
451- static void klp_unpatch_object (struct klp_object * obj )
452- {
453- struct klp_func * func ;
454-
455- klp_for_each_func (obj , func )
456- if (func -> patched )
457- klp_unpatch_func (func );
458-
459- obj -> patched = false;
460- }
461-
462- static int klp_patch_object (struct klp_object * obj )
463- {
464- struct klp_func * func ;
465- int ret ;
466-
467- if (WARN_ON (obj -> patched ))
468- return - EINVAL ;
469-
470- klp_for_each_func (obj , func ) {
471- ret = klp_patch_func (func );
472- if (ret ) {
473- klp_unpatch_object (obj );
474- return ret ;
475- }
476- }
477- obj -> patched = true;
478-
479- return 0 ;
480- }
481-
482282static int __klp_disable_patch (struct klp_patch * patch )
483283{
484284 struct klp_object * obj ;
0 commit comments