3a7ad0634f
This reverts commit71e4128620
. mmap()/munmap() can not be backed by kmalloced pages : We fault in : VM_BUG_ON_PAGE(PageSlab(page), page); unmap_single_vma+0x8a/0x110 unmap_vmas+0x4b/0x90 unmap_region+0xc9/0x140 do_munmap+0x274/0x360 vm_munmap+0x81/0xc0 SyS_munmap+0x2b/0x40 do_syscall_64+0x13e/0x1c0 entry_SYSCALL_64_after_hwframe+0x42/0xb7 Fixes:71e4128620
("packet: switch kvzalloc to allocate memory") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: John Sperbeck <jsperbeck@google.com> Bisected-by: John Sperbeck <jsperbeck@google.com> Cc: Zhang Yu <zhangyu31@baidu.com> Cc: Li RongQing <lirongqing@baidu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
142 lines
3.2 KiB
C
142 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PACKET_INTERNAL_H__
|
|
#define __PACKET_INTERNAL_H__
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
struct packet_mclist {
|
|
struct packet_mclist *next;
|
|
int ifindex;
|
|
int count;
|
|
unsigned short type;
|
|
unsigned short alen;
|
|
unsigned char addr[MAX_ADDR_LEN];
|
|
};
|
|
|
|
/* kbdq - kernel block descriptor queue */
|
|
struct tpacket_kbdq_core {
|
|
struct pgv *pkbdq;
|
|
unsigned int feature_req_word;
|
|
unsigned int hdrlen;
|
|
unsigned char reset_pending_on_curr_blk;
|
|
unsigned char delete_blk_timer;
|
|
unsigned short kactive_blk_num;
|
|
unsigned short blk_sizeof_priv;
|
|
|
|
/* last_kactive_blk_num:
|
|
* trick to see if user-space has caught up
|
|
* in order to avoid refreshing timer when every single pkt arrives.
|
|
*/
|
|
unsigned short last_kactive_blk_num;
|
|
|
|
char *pkblk_start;
|
|
char *pkblk_end;
|
|
int kblk_size;
|
|
unsigned int max_frame_len;
|
|
unsigned int knum_blocks;
|
|
uint64_t knxt_seq_num;
|
|
char *prev;
|
|
char *nxt_offset;
|
|
struct sk_buff *skb;
|
|
|
|
atomic_t blk_fill_in_prog;
|
|
|
|
/* Default is set to 8ms */
|
|
#define DEFAULT_PRB_RETIRE_TOV (8)
|
|
|
|
unsigned short retire_blk_tov;
|
|
unsigned short version;
|
|
unsigned long tov_in_jiffies;
|
|
|
|
/* timer to retire an outstanding block */
|
|
struct timer_list retire_blk_timer;
|
|
};
|
|
|
|
struct pgv {
|
|
char *buffer;
|
|
};
|
|
|
|
struct packet_ring_buffer {
|
|
struct pgv *pg_vec;
|
|
|
|
unsigned int head;
|
|
unsigned int frames_per_block;
|
|
unsigned int frame_size;
|
|
unsigned int frame_max;
|
|
|
|
unsigned int pg_vec_order;
|
|
unsigned int pg_vec_pages;
|
|
unsigned int pg_vec_len;
|
|
|
|
unsigned int __percpu *pending_refcnt;
|
|
|
|
struct tpacket_kbdq_core prb_bdqc;
|
|
};
|
|
|
|
extern struct mutex fanout_mutex;
|
|
#define PACKET_FANOUT_MAX 256
|
|
|
|
struct packet_fanout {
|
|
possible_net_t net;
|
|
unsigned int num_members;
|
|
u16 id;
|
|
u8 type;
|
|
u8 flags;
|
|
union {
|
|
atomic_t rr_cur;
|
|
struct bpf_prog __rcu *bpf_prog;
|
|
};
|
|
struct list_head list;
|
|
struct sock *arr[PACKET_FANOUT_MAX];
|
|
spinlock_t lock;
|
|
refcount_t sk_ref;
|
|
struct packet_type prot_hook ____cacheline_aligned_in_smp;
|
|
};
|
|
|
|
struct packet_rollover {
|
|
int sock;
|
|
atomic_long_t num;
|
|
atomic_long_t num_huge;
|
|
atomic_long_t num_failed;
|
|
#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
|
|
u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
struct packet_sock {
|
|
/* struct sock has to be the first member of packet_sock */
|
|
struct sock sk;
|
|
struct packet_fanout *fanout;
|
|
union tpacket_stats_u stats;
|
|
struct packet_ring_buffer rx_ring;
|
|
struct packet_ring_buffer tx_ring;
|
|
int copy_thresh;
|
|
spinlock_t bind_lock;
|
|
struct mutex pg_vec_lock;
|
|
unsigned int running; /* bind_lock must be held */
|
|
unsigned int auxdata:1, /* writer must hold sock lock */
|
|
origdev:1,
|
|
has_vnet_hdr:1,
|
|
tp_loss:1,
|
|
tp_tx_has_off:1;
|
|
int pressure;
|
|
int ifindex; /* bound device */
|
|
__be16 num;
|
|
struct packet_rollover *rollover;
|
|
struct packet_mclist *mclist;
|
|
atomic_t mapped;
|
|
enum tpacket_versions tp_version;
|
|
unsigned int tp_hdrlen;
|
|
unsigned int tp_reserve;
|
|
unsigned int tp_tstamp;
|
|
struct net_device __rcu *cached_dev;
|
|
int (*xmit)(struct sk_buff *skb);
|
|
struct packet_type prot_hook ____cacheline_aligned_in_smp;
|
|
};
|
|
|
|
static struct packet_sock *pkt_sk(struct sock *sk)
|
|
{
|
|
return (struct packet_sock *)sk;
|
|
}
|
|
|
|
#endif
|