-
Notifications
You must be signed in to change notification settings - Fork 2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix script loading synchronization issue and add LuaUnpack submodule #1
base: lneto-1
Are you sure you want to change the base?
Changes from 7 commits
ca0a05c
00b981c
2fda6a1
cd9fff9
d29c749
aa6b663
497fd52
42219e1
904bb0e
17af788
73e349a
822408f
47d5469
2bb260c
5afe3af
522f718
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -63,17 +63,26 @@ struct xdp_rxq_info { | |
struct xdp_mem_info mem; | ||
} ____cacheline_aligned; /* perf critical, avoid false-sharing */ | ||
|
||
#ifdef CONFIG_XDP_LUA | ||
struct xdplua { | ||
struct lua_State *L; | ||
spinlock_t *lock; | ||
}; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So this
And you can easily grab this, as you already do:
This might be moot if you just remove the lock anyway. Also, should be possible, and it would be much superior, if instead of having a lua_State pointer as the per cpu attribute, you just had the value directly, that would be one less unnecessary level of indirection. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, maybe it would be possible not to use this struct. I'll try this out. |
||
|
||
DECLARE_PER_CPU(struct xdplua, xdplua_per_cpu); | ||
#endif /* CONFIG_XDP_LUA */ | ||
|
||
struct xdp_buff { | ||
void *data; | ||
void *data_end; | ||
void *data_meta; | ||
void *data_hard_start; | ||
unsigned long handle; | ||
struct xdp_rxq_info *rxq; | ||
#ifdef CONFIG_XDPLUA | ||
#ifdef CONFIG_XDP_LUA | ||
struct sk_buff *skb; | ||
struct lua_State *L; | ||
#endif /* CONFIG_XDPLUA */ | ||
struct xdplua *xdplua; | ||
#endif /* CONFIG_XDP_LUA */ | ||
}; | ||
|
||
struct xdp_frame { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -68,6 +68,13 @@ | |
* - netif_rx() feedback | ||
*/ | ||
|
||
#ifdef CONFIG_XDP_LUA | ||
#include <lua.h> | ||
#include <lauxlib.h> | ||
#include <lualib.h> | ||
#include <luadata.h> | ||
#endif /* CONFIG_XDP_LUA */ | ||
|
||
#include <linux/uaccess.h> | ||
#include <linux/bitops.h> | ||
#include <linux/capability.h> | ||
|
@@ -143,12 +150,6 @@ | |
#include <linux/indirect_call_wrapper.h> | ||
#include <net/devlink.h> | ||
|
||
#ifdef CONFIG_XDPLUA | ||
#include <lua.h> | ||
#include <lauxlib.h> | ||
#include <lualib.h> | ||
#endif /* CONFIG_XDPLUA */ | ||
|
||
#include "net-sysfs.h" | ||
|
||
#define MAX_GRO_SKBS 8 | ||
|
@@ -158,6 +159,9 @@ | |
|
||
static DEFINE_SPINLOCK(ptype_lock); | ||
static DEFINE_SPINLOCK(offload_lock); | ||
#ifdef CONFIG_XDP_LUA | ||
static DEFINE_PER_CPU(spinlock_t, lua_state_lock); | ||
#endif | ||
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | ||
struct list_head ptype_all __read_mostly; /* Taps */ | ||
static struct list_head offload_base __read_mostly; | ||
|
@@ -170,10 +174,6 @@ static int call_netdevice_notifiers_extack(unsigned long val, | |
struct netlink_ext_ack *extack); | ||
static struct napi_struct *napi_by_id(unsigned int napi_id); | ||
|
||
#ifdef CONFIG_XDPLUA | ||
struct list_head lua_state_cpu_list; | ||
#endif /* CONFIG_XDPLUA */ | ||
|
||
/* | ||
* The @dev_base_head list is protected by @dev_base_lock and the rtnl | ||
* semaphore. | ||
|
@@ -4381,9 +4381,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | |
|
||
rxqueue = netif_get_rxqueue(skb); | ||
xdp->rxq = &rxqueue->xdp_rxq; | ||
#ifdef CONFIG_XDPLUA | ||
#ifdef CONFIG_XDP_LUA | ||
xdp->skb = skb; | ||
#endif /* CONFIG_XDPLUA */ | ||
#endif /* CONFIG_XDP_LUA */ | ||
|
||
act = bpf_prog_run_xdp(xdp_prog, xdp); | ||
|
||
|
@@ -5196,21 +5196,29 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) | |
return ret; | ||
} | ||
|
||
#ifdef CONFIG_XDPLUA | ||
#ifdef CONFIG_XDP_LUA | ||
DEFINE_PER_CPU(struct xdplua, xdplua_per_cpu); | ||
|
||
int generic_xdp_lua_install_prog(char *lua_prog) | ||
{ | ||
struct lua_state_cpu *sc; | ||
struct xdplua *sc; | ||
int i; | ||
|
||
list_for_each_entry(sc, &lua_state_cpu_list, list) { | ||
for_each_possible_cpu(i) { | ||
sc = per_cpu_ptr(&xdplua_per_cpu, i); | ||
spin_lock_bh(sc->lock); | ||
if (luaL_dostring(sc->L, lua_prog)) { | ||
pr_err(KERN_INFO "error: %s\nOn cpu: %d\n", | ||
lua_tostring(sc->L, -1), sc->cpu); | ||
lua_tostring(sc->L, -1), i); | ||
spin_unlock_bh(sc->lock); | ||
return -EINVAL; | ||
} | ||
|
||
spin_unlock_bh(sc->lock); | ||
} | ||
return 0; | ||
} | ||
#endif /* CONFIG_XDPLUA */ | ||
#endif /* CONFIG_XDP_LUA */ | ||
|
||
static int netif_receive_skb_internal(struct sk_buff *skb) | ||
{ | ||
|
@@ -9829,9 +9837,6 @@ static struct pernet_operations __net_initdata default_device_ops = { | |
static int __init net_dev_init(void) | ||
{ | ||
int i, rc = -ENOMEM; | ||
#ifdef CONFIG_XDPLUA | ||
struct lua_state_cpu *new_state_cpu; | ||
#endif /* CONFIG_XDPLUA */ | ||
|
||
BUG_ON(!dev_boot_phase); | ||
|
||
|
@@ -9846,9 +9851,6 @@ static int __init net_dev_init(void) | |
INIT_LIST_HEAD(&ptype_base[i]); | ||
|
||
INIT_LIST_HEAD(&offload_base); | ||
#ifdef CONFIG_XDPLUA | ||
INIT_LIST_HEAD(&lua_state_cpu_list); | ||
#endif /* CONFIG_XDPLUA */ | ||
|
||
if (register_pernet_subsys(&netdev_net_ops)) | ||
goto out; | ||
|
@@ -9860,6 +9862,7 @@ static int __init net_dev_init(void) | |
for_each_possible_cpu(i) { | ||
struct work_struct *flush = per_cpu_ptr(&flush_works, i); | ||
struct softnet_data *sd = &per_cpu(softnet_data, i); | ||
struct xdplua *xdplua = per_cpu_ptr(&xdplua_per_cpu, i); | ||
|
||
INIT_WORK(flush, flush_backlog); | ||
|
||
|
@@ -9879,26 +9882,18 @@ static int __init net_dev_init(void) | |
init_gro_hash(&sd->backlog); | ||
sd->backlog.poll = process_backlog; | ||
sd->backlog.weight = weight_p; | ||
|
||
#ifdef CONFIG_XDPLUA | ||
new_state_cpu = (struct lua_state_cpu *) | ||
kmalloc(sizeof(lua_state_cpu), GFP_ATOMIC); | ||
if (!new_state_cpu) | ||
continue; | ||
|
||
new_state_cpu->L = luaL_newstate(); | ||
if (!new_state_cpu->L) { | ||
kfree(new_state_cpu); | ||
#ifdef CONFIG_XDP_LUA | ||
xdplua->L = luaL_newstate(); | ||
if (!xdplua->L) { | ||
kfree(xdplua); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Leaving xdplua dangling. |
||
continue; | ||
} | ||
|
||
luaL_openlibs(new_state_cpu->L); | ||
luaL_requiref(new_state_cpu->L, "data", luaopen_data, 1); | ||
lua_pop(new_state_cpu->L, 1); | ||
new_state_cpu->cpu = i; | ||
|
||
list_add(&new_state_cpu->list, &lua_state_cpu_list); | ||
#endif /* CONFIG_XDPLUA */ | ||
luaL_openlibs(xdplua->L); | ||
luaL_requiref(xdplua->L, "data", luaopen_data, 1); | ||
lua_pop(xdplua->L, 1); | ||
xdplua->lock = per_cpu_ptr(&lua_state_lock, i); | ||
#endif /* CONFIG_XDP_LUA */ | ||
} | ||
|
||
dev_boot_phase = 0; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is
spinlock
the right concurrency primitive here?So this is a lock per lua state, and there is a lua state per CPU, and spinlocks are only appropriate for locking accesses between different CPUs to the same data. And also, even in that context, spinlocks are highly inefficient when they are taken for a long time, such as would be the case for executing a lua script.
So if you ever happened upon a case where the same CPU got this lock multiple times, it would have hung forever, so I suspect that you might not even need a lock at all.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Unfortunately a lock is indeed needed. If a user attempts to load a Lua script while another script is being executed in the same Lua state, we will have concurrent accesses to the a Lua State, which will cause synchronization issues. If not a
spinlock
, what would you suggest?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Based on your other message, I see now that what you probably need is just
local_bh_disable
andlocal_bh_enable
when loading script.