Skip to content

Commit 78dcdff

Browse files
dcarattiPaolo Abeni
authored andcommitted
net/sched: act_mirred: better wording on protection against excessive stack growth
with commit e2ca070 ("net: sched: protect against stack overflow in TC act_mirred"), act_mirred protected itself against excessive stack growth using per_cpu counter of nested calls to tcf_mirred_act(), and capping it to MIRRED_RECURSION_LIMIT. However, such protection does not detect recursion/loops in case the packet is enqueued to the backlog (for example, when the mirred target device has RPS or skb timestamping enabled). Change the wording from "recursion" to "nesting" to make it more clear to readers. CC: Jamal Hadi Salim <[email protected]> Signed-off-by: Davide Caratti <[email protected]> Reviewed-by: Marcelo Ricardo Leitner <[email protected]> Acked-by: Jamal Hadi Salim <[email protected]> Signed-off-by: Paolo Abeni <[email protected]>
1 parent 5cf6c22 commit 78dcdff

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

net/sched/act_mirred.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
static LIST_HEAD(mirred_list);
3030
static DEFINE_SPINLOCK(mirred_list_lock);
3131

32-
#define MIRRED_RECURSION_LIMIT 4
33-
static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32+
#define MIRRED_NEST_LIMIT 4
33+
static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
3434

3535
static bool tcf_mirred_is_act_redirect(int action)
3636
{
@@ -226,7 +226,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
226226
struct sk_buff *skb2 = skb;
227227
bool m_mac_header_xmit;
228228
struct net_device *dev;
229-
unsigned int rec_level;
229+
unsigned int nest_level;
230230
int retval, err = 0;
231231
bool use_reinsert;
232232
bool want_ingress;
@@ -237,11 +237,11 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
237237
int mac_len;
238238
bool at_nh;
239239

240-
rec_level = __this_cpu_inc_return(mirred_rec_level);
241-
if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
240+
nest_level = __this_cpu_inc_return(mirred_nest_level);
241+
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
242242
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
243243
netdev_name(skb->dev));
244-
__this_cpu_dec(mirred_rec_level);
244+
__this_cpu_dec(mirred_nest_level);
245245
return TC_ACT_SHOT;
246246
}
247247

@@ -310,7 +310,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
310310
err = tcf_mirred_forward(want_ingress, skb);
311311
if (err)
312312
tcf_action_inc_overlimit_qstats(&m->common);
313-
__this_cpu_dec(mirred_rec_level);
313+
__this_cpu_dec(mirred_nest_level);
314314
return TC_ACT_CONSUMED;
315315
}
316316
}
@@ -322,7 +322,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
322322
if (tcf_mirred_is_act_redirect(m_eaction))
323323
retval = TC_ACT_SHOT;
324324
}
325-
__this_cpu_dec(mirred_rec_level);
325+
__this_cpu_dec(mirred_nest_level);
326326

327327
return retval;
328328
}

0 commit comments

Comments
 (0)