diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 19112712daf0..c79e0ad4ba02 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -198,18 +198,12 @@ sl_alloc_bufs(struct slip *sl, int mtu) static void sl_free_bufs(struct slip *sl) { - void * tmp; - /* Free all SLIP frame buffers. */ - tmp = xchg(&sl->rbuff, NULL); - kfree(tmp); - tmp = xchg(&sl->xbuff, NULL); - kfree(tmp); + kfree(xchg(&sl->rbuff, NULL)); + kfree(xchg(&sl->xbuff, NULL)); #ifdef SL_INCLUDE_CSLIP - tmp = xchg(&sl->cbuff, NULL); - kfree(tmp); - if ((tmp = xchg(&sl->slcomp, NULL)) != NULL) - slhc_free(tmp); + kfree(xchg(&sl->cbuff, NULL)); + slhc_free(xchg(&sl->slcomp, NULL)); #endif } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d89816ad642f..3a0ed7f9e801 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -164,12 +164,6 @@ struct netif_rx_stats unsigned total; unsigned dropped; unsigned time_squeeze; - unsigned throttled; - unsigned fastroute_hit; - unsigned fastroute_success; - unsigned fastroute_defer; - unsigned fastroute_deferred_out; - unsigned fastroute_latency_reduction; unsigned cpu_collision; }; @@ -562,12 +556,9 @@ static inline int unregister_gifconf(unsigned int family) struct softnet_data { - int throttle; - int cng_level; - int avg_blog; + struct net_device *output_queue; struct sk_buff_head input_pkt_queue; struct list_head poll_list; - struct net_device *output_queue; struct sk_buff *completion_queue; struct net_device backlog_dev; /* Sorry. 8) */ diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index d2aa214d6803..25d2d67c1faf 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h @@ -408,6 +408,7 @@ enum TCF_EM_NBYTE, TCF_EM_U32, TCF_EM_META, + TCF_EM_TEXT, __TCF_EM_MAX }; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index e68dbf0bf579..d021888b58f1 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -892,10 +892,13 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi goto rtattr_failure; \ __rta_fill(skb, attrtype, attrlen, data); }) -#define RTA_PUT_NOHDR(skb, attrlen, data) \ +#define RTA_APPEND(skb, attrlen, data) \ ({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ goto rtattr_failure; \ - memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); }) + memcpy(skb_put(skb, attrlen), data, attrlen); }) + +#define RTA_PUT_NOHDR(skb, attrlen, data) \ + RTA_APPEND(skb, RTA_ALIGN(attrlen), data) #define RTA_PUT_U8(skb, attrtype, value) \ ({ u8 _tmp = (value); \ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d7c839a21842..416a2e4024b2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #define HAVE_ALLOC_SKB /* For the drivers to know */ @@ -321,6 +322,28 @@ extern void skb_over_panic(struct sk_buff *skb, int len, extern void skb_under_panic(struct sk_buff *skb, int len, void *here); +struct skb_seq_state +{ + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +extern void skb_prepare_seq_read(struct sk_buff *skb, + unsigned int from, unsigned int to, + struct skb_seq_state *st); +extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +extern void skb_abort_seq_read(struct skb_seq_state *st); + +extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state); + /* Internal */ #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 72965bfe6cfb..ebfe1250f0a4 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -243,6 +243,7 @@ enum NET_CORE_MOD_CONG=16, NET_CORE_DEV_WEIGHT=17, NET_CORE_SOMAXCONN=18, + NET_CORE_BUDGET=19, }; /* /proc/sys/net/ethernet */ diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h new file mode 100644 index 000000000000..7cd43e99c7f5 --- /dev/null +++ b/include/linux/tc_ematch/tc_em_text.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_TC_EM_TEXT_H +#define __LINUX_TC_EM_TEXT_H + +#include + +#define TC_EM_TEXT_ALGOSIZ 16 + +struct tcf_em_text +{ + char algo[TC_EM_TEXT_ALGOSIZ]; + __u16 from_offset; + __u16 to_offset; + __u16 pattern_len; + __u8 from_layer:4; + __u8 to_layer:4; + __u8 pad; +}; + +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3ea75dd6640a..dfd93d03f5d2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -127,6 +127,7 @@ enum { #define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ #define TCP_INFO 11 /* Information about this connection. */ #define TCP_QUICKACK 12 /* Block/reenable quick acks */ +#define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCPI_OPT_TIMESTAMPS 1 #define TCPI_OPT_SACK 2 diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h new file mode 100644 index 000000000000..941f45ac117a --- /dev/null +++ b/include/linux/textsearch.h @@ -0,0 +1,180 @@ +#ifndef __LINUX_TEXTSEARCH_H +#define __LINUX_TEXTSEARCH_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +struct ts_config; + +/** + * TS_AUTOLOAD - Automatically load textsearch modules when needed + */ +#define TS_AUTOLOAD 1 + +/** + * struct ts_state - search state + * @offset: offset for next match + * @cb: control buffer, for persistant variables of get_next_block() + */ +struct ts_state +{ + unsigned int offset; + char cb[40]; +}; + +/** + * struct ts_ops - search module operations + * @name: name of search algorithm + * @init: initialization function to prepare a search + * @find: find the next occurrence of the pattern + * @destroy: destroy algorithm specific parts of a search configuration + * @get_pattern: return head of pattern + * @get_pattern_len: return length of pattern + * @owner: module reference to algorithm + */ +struct ts_ops +{ + const char *name; + struct ts_config * (*init)(const void *, unsigned int, int); + unsigned int (*find)(struct ts_config *, + struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +/** + * struct ts_config - search configuration + * @ops: operations of chosen algorithm + * @get_next_block: callback to fetch the next block to search in + * @finish: callback to finalize a search + */ +struct ts_config +{ + struct ts_ops *ops; + + /** + * get_next_block - fetch next block of data + * @consumed: number of bytes consumed by the caller + * @dst: destination buffer + * @conf: search configuration + * @state: search state + * + * Called repeatedly until 0 is returned. Must assign the + * head of the next block of data to &*dst and return the length + * of the block or 0 if at the end. consumed == 0 indicates + * a new search. May store/read persistant values in state->cb. + */ + unsigned int (*get_next_block)(unsigned int consumed, + const u8 **dst, + struct ts_config *conf, + struct ts_state *state); + + /** + * finish - finalize/clean a series of get_next_block() calls + * @conf: search configuration + * @state: search state + * + * Called after the last use of get_next_block(), may be used + * to cleanup any leftovers. + */ + void (*finish)(struct ts_config *conf, + struct ts_state *state); +}; + +/** + * textsearch_next - continue searching for a pattern + * @conf: search configuration + * @state: search state + * + * Continues a search looking for more occurrences of the pattern. + * textsearch_find() must be called to find the first occurrence + * in order to reset the state. + * + * Returns the position of the next occurrence of the pattern or + * UINT_MAX if not match was found. + */ +static inline unsigned int textsearch_next(struct ts_config *conf, + struct ts_state *state) +{ + unsigned int ret = conf->ops->find(conf, state); + + if (conf->finish) + conf->finish(conf, state); + + return ret; +} + +/** + * textsearch_find - start searching for a pattern + * @conf: search configuration + * @state: search state + * + * Returns the position of first occurrence of the pattern or + * UINT_MAX if no match was found. + */ +static inline unsigned int textsearch_find(struct ts_config *conf, + struct ts_state *state) +{ + state->offset = 0; + return textsearch_next(conf, state); +} + +/** + * textsearch_get_pattern - return head of the pattern + * @conf: search configuration + */ +static inline void *textsearch_get_pattern(struct ts_config *conf) +{ + return conf->ops->get_pattern(conf); +} + +/** + * textsearch_get_pattern_len - return length of the pattern + * @conf: search configuration + */ +static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf) +{ + return conf->ops->get_pattern_len(conf); +} + +extern int textsearch_register(struct ts_ops *); +extern int textsearch_unregister(struct ts_ops *); +extern struct ts_config *textsearch_prepare(const char *, const void *, + unsigned int, int, int); +extern void textsearch_destroy(struct ts_config *conf); +extern unsigned int textsearch_find_continuous(struct ts_config *, + struct ts_state *, + const void *, unsigned int); + + +#define TS_PRIV_ALIGNTO 8 +#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) + +static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) +{ + struct ts_config *conf; + + conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); + if (conf == NULL) + return ERR_PTR(-ENOMEM); + + memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload); + return conf; +} + +static inline void *ts_config_priv(struct ts_config *conf) +{ + return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config))); +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h new file mode 100644 index 000000000000..fdfa078c66e5 --- /dev/null +++ b/include/linux/textsearch_fsm.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_TEXTSEARCH_FSM_H +#define __LINUX_TEXTSEARCH_FSM_H + +#include + +enum { + TS_FSM_SPECIFIC, /* specific character */ + TS_FSM_WILDCARD, /* any character */ + TS_FSM_DIGIT, /* isdigit() */ + TS_FSM_XDIGIT, /* isxdigit() */ + TS_FSM_PRINT, /* isprint() */ + TS_FSM_ALPHA, /* isalpha() */ + TS_FSM_ALNUM, /* isalnum() */ + TS_FSM_ASCII, /* isascii() */ + TS_FSM_CNTRL, /* iscntrl() */ + TS_FSM_GRAPH, /* isgraph() */ + TS_FSM_LOWER, /* islower() */ + TS_FSM_UPPER, /* isupper() */ + TS_FSM_PUNCT, /* ispunct() */ + TS_FSM_SPACE, /* isspace() */ + __TS_FSM_TYPE_MAX, +}; +#define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1) + +enum { + TS_FSM_SINGLE, /* 1 occurrence */ + TS_FSM_PERHAPS, /* 1 or 0 occurrence */ + TS_FSM_ANY, /* 0..n occurrences */ + TS_FSM_MULTI, /* 1..n occurrences */ + TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */ + __TS_FSM_RECUR_MAX, +}; +#define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1) + +/** + * struct ts_fsm_token - state machine token (state) + * @type: type of token + * @recur: number of recurrences + * @value: character value for TS_FSM_SPECIFIC + */ +struct ts_fsm_token +{ + __u16 type; + __u8 recur; + __u8 value; +}; + +#endif diff --git a/include/net/tcp.h b/include/net/tcp.h index e427cf35915c..ec9e20c27179 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1162,12 +1162,14 @@ extern void tcp_init_congestion_control(struct tcp_sock *tp); extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); extern int tcp_set_default_congestion_control(const char *name); extern void tcp_get_default_congestion_control(char *name); +extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name); -extern struct tcp_congestion_ops tcp_reno; +extern struct tcp_congestion_ops tcp_init_congestion_ops; extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight, int flag); extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); +extern struct tcp_congestion_ops tcp_reno; static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) { diff --git a/lib/Kconfig b/lib/Kconfig index 2d4d4e3bc4aa..455833a9e31a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -63,5 +63,32 @@ config REED_SOLOMON_ENC16 config REED_SOLOMON_DEC16 boolean -endmenu +config TEXTSEARCH + boolean "Textsearch infrastructure" + default y + help + Say Y here if you want to provide a textsearch infrastructure + to other subsystems. +config TEXTSEARCH_KMP + depends on TEXTSEARCH + tristate "Knuth-Morris-Pratt" + help + Say Y here if you want to be able to search text using the + Knuth-Morris-Pratt textsearch algorithm. + + To compile this code as a module, choose M here: the + module will be called ts_kmp. + +config TEXTSEARCH_FSM + depends on TEXTSEARCH + tristate "Finite state machine" + help + Say Y here if you want to be able to search text using a + naive finite state machine approach implementing a subset + of regular expressions. + + To compile this code as a module, choose M here: the + module will be called ts_fsm. + +endmenu diff --git a/lib/Makefile b/lib/Makefile index dcb4231916e2..beed1585294c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -36,6 +36,10 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ +obj-$(CONFIG_TEXTSEARCH) += textsearch.o +obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o +obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/textsearch.c b/lib/textsearch.c new file mode 100644 index 000000000000..1e934c196f0f --- /dev/null +++ b/lib/textsearch.c @@ -0,0 +1,317 @@ +/* + * lib/textsearch.c Generic text search interface + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf + * Pablo Neira Ayuso + * + * ========================================================================== + * + * INTRODUCTION + * + * The textsearch infrastructure provides text searching facitilies for + * both linear and non-linear data. Individual search algorithms are + * implemented in modules and chosen by the user. + * + * ARCHITECTURE + * + * User + * +----------------+ + * | finish()|<--------------(6)-----------------+ + * |get_next_block()|<--------------(5)---------------+ | + * | | Algorithm | | + * | | +------------------------------+ + * | | | init() find() destroy() | + * | | +------------------------------+ + * | | Core API ^ ^ ^ + * | | +---------------+ (2) (4) (8) + * | (1)|----->| prepare() |---+ | | + * | (3)|----->| find()/next() |-----------+ | + * | (7)|----->| destroy() |----------------------+ + * +----------------+ +---------------+ + * + * (1) User configures a search by calling _prepare() specifying the + * search parameters such as the pattern and algorithm name. + * (2) Core requests the algorithm to allocate and initialize a search + * configuration according to the specified parameters. + * (3) User starts the search(es) by calling _find() or _next() to + * fetch subsequent occurrences. A state variable is provided + * to the algorihtm to store persistant variables. + * (4) Core eventually resets the search offset and forwards the find() + * request to the algorithm. + * (5) Algorithm calls get_next_block() provided by the user continously + * to fetch the data to be searched in block by block. + * (6) Algorithm invokes finish() after the last call to get_next_block + * to clean up any leftovers from get_next_block. (Optional) + * (7) User destroys the configuration by calling _destroy(). + * (8) Core notifies the algorithm to destroy algorithm specific + * allocations. (Optional) + * + * USAGE + * + * Before a search can be performed, a configuration must be created + * by calling textsearch_prepare() specyfing the searching algorithm and + * the pattern to look for. The returned configuration may then be used + * for an arbitary amount of times and even in parallel as long as a + * separate struct ts_state variable is provided to every instance. + * + * The actual search is performed by either calling textsearch_find_- + * continuous() for linear data or by providing an own get_next_block() + * implementation and calling textsearch_find(). Both functions return + * the position of the first occurrence of the patern or UINT_MAX if + * no match was found. Subsequent occurences can be found by calling + * textsearch_next() regardless of the linearity of the data. + * + * Once you're done using a configuration it must be given back via + * textsearch_destroy. + * + * EXAMPLE + * + * int pos; + * struct ts_config *conf; + * struct ts_state state; + * const char *pattern = "chicken"; + * const char *example = "We dance the funky chicken"; + * + * conf = textsearch_prepare("kmp", pattern, strlen(pattern), + * GFP_KERNEL, TS_AUTOLOAD); + * if (IS_ERR(conf)) { + * err = PTR_ERR(conf); + * goto errout; + * } + * + * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); + * if (pos != UINT_MAX) + * panic("Oh my god, dancing chickens at %d\n", pos); + * + * textsearch_destroy(conf); + * + * ========================================================================== + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(ts_ops); +static DEFINE_SPINLOCK(ts_mod_lock); + +static inline struct ts_ops *lookup_ts_algo(const char *name) +{ + struct ts_ops *o; + + rcu_read_lock(); + list_for_each_entry_rcu(o, &ts_ops, list) { + if (!strcmp(name, o->name)) { + if (!try_module_get(o->owner)) + o = NULL; + rcu_read_unlock(); + return o; + } + } + rcu_read_unlock(); + + return NULL; +} + +/** + * textsearch_register - register a textsearch module + * @ops: operations lookup table + * + * This function must be called by textsearch modules to announce + * their presence. The specified &@ops must have %name set to a + * unique identifier and the callbacks find(), init(), get_pattern(), + * and get_pattern_len() must be implemented. + * + * Returns 0 or -EEXISTS if another module has already registered + * with same name. + */ +int textsearch_register(struct ts_ops *ops) +{ + int err = -EEXIST; + struct ts_ops *o; + + if (ops->name == NULL || ops->find == NULL || ops->init == NULL || + ops->get_pattern == NULL || ops->get_pattern_len == NULL) + return -EINVAL; + + spin_lock(&ts_mod_lock); + list_for_each_entry(o, &ts_ops, list) { + if (!strcmp(ops->name, o->name)) + goto errout; + } + + list_add_tail_rcu(&ops->list, &ts_ops); + err = 0; +errout: + spin_unlock(&ts_mod_lock); + return err; +} + +/** + * textsearch_unregister - unregister a textsearch module + * @ops: operations lookup table + * + * This function must be called by textsearch modules to announce + * their disappearance for examples when the module gets unloaded. + * The &ops parameter must be the same as the one during the + * registration. + * + * Returns 0 on success or -ENOENT if no matching textsearch + * registration was found. + */ +int textsearch_unregister(struct ts_ops *ops) +{ + int err = 0; + struct ts_ops *o; + + spin_lock(&ts_mod_lock); + list_for_each_entry(o, &ts_ops, list) { + if (o == ops) { + list_del_rcu(&o->list); + goto out; + } + } + + err = -ENOENT; +out: + spin_unlock(&ts_mod_lock); + return err; +} + +struct ts_linear_state +{ + unsigned int len; + const void *data; +}; + +static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, + struct ts_config *conf, + struct ts_state *state) +{ + struct ts_linear_state *st = (struct ts_linear_state *) state->cb; + + if (likely(consumed < st->len)) { + *dst = st->data + consumed; + return st->len - consumed; + } + + return 0; +} + +/** + * textsearch_find_continuous - search a pattern in continuous/linear data + * @conf: search configuration + * @state: search state + * @data: data to search in + * @len: length of data + * + * A simplified version of textsearch_find() for continuous/linear data. + * Call textsearch_next() to retrieve subsequent matches. + * + * Returns the position of first occurrence of the pattern or + * UINT_MAX if no occurrence was found. + */ +unsigned int textsearch_find_continuous(struct ts_config *conf, + struct ts_state *state, + const void *data, unsigned int len) +{ + struct ts_linear_state *st = (struct ts_linear_state *) state->cb; + + conf->get_next_block = get_linear_data; + st->data = data; + st->len = len; + + return textsearch_find(conf, state); +} + +/** + * textsearch_prepare - Prepare a search + * @algo: name of search algorithm + * @pattern: pattern data + * @len: length of pattern + * @gfp_mask: allocation mask + * @flags: search flags + * + * Looks up the search algorithm module and creates a new textsearch + * configuration for the specified pattern. Upon completion all + * necessary refcnts are held and the configuration must be put back + * using textsearch_put() after usage. + * + * Note: The format of the pattern may not be compatible between + * the various search algorithms. + * + * Returns a new textsearch configuration according to the specified + * parameters or a ERR_PTR(). + */ +struct ts_config *textsearch_prepare(const char *algo, const void *pattern, + unsigned int len, int gfp_mask, int flags) +{ + int err = -ENOENT; + struct ts_config *conf; + struct ts_ops *ops; + + ops = lookup_ts_algo(algo); +#ifdef CONFIG_KMOD + /* + * Why not always autoload you may ask. Some users are + * in a situation where requesting a module may deadlock, + * especially when the module is located on a NFS mount. + */ + if (ops == NULL && flags & TS_AUTOLOAD) { + request_module("ts_%s", algo); + ops = lookup_ts_algo(algo); + } +#endif + + if (ops == NULL) + goto errout; + + conf = ops->init(pattern, len, gfp_mask); + if (IS_ERR(conf)) { + err = PTR_ERR(conf); + goto errout; + } + + conf->ops = ops; + return conf; + +errout: + if (ops) + module_put(ops->owner); + + return ERR_PTR(err); +} + +/** + * textsearch_destroy - destroy a search configuration + * @conf: search configuration + * + * Releases all references of the configuration and frees + * up the memory. + */ +void textsearch_destroy(struct ts_config *conf) +{ + if (conf->ops) { + if (conf->ops->destroy) + conf->ops->destroy(conf); + module_put(conf->ops->owner); + } + + kfree(conf); +} + +EXPORT_SYMBOL(textsearch_register); +EXPORT_SYMBOL(textsearch_unregister); +EXPORT_SYMBOL(textsearch_prepare); +EXPORT_SYMBOL(textsearch_find_continuous); +EXPORT_SYMBOL(textsearch_destroy); diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c new file mode 100644 index 000000000000..d27c0a072940 --- /dev/null +++ b/lib/ts_fsm.c @@ -0,0 +1,338 @@ +/* + * lib/ts_fsm.c A naive finite state machine text search approach + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf + * + * ========================================================================== + * + * A finite state machine consists of n states (struct ts_fsm_token) + * representing the pattern as a finite automation. The data is read + * sequentially on a octet basis. Every state token specifies the number + * of recurrences and the type of value accepted which can be either a + * specific character or ctype based set of characters. The available + * type of recurrences include 1, (0|1), [0 n], and [1 n]. + * + * The algorithm differs between strict/non-strict mode specyfing + * whether the pattern has to start at the first octect. Strict mode + * is enabled by default and can be disabled by inserting + * TS_FSM_HEAD_IGNORE as the first token in the chain. + * + * The runtime performance of the algorithm should be around O(n), + * however while in strict mode the average runtime can be better. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct ts_fsm +{ + unsigned int ntokens; + struct ts_fsm_token tokens[0]; +}; + +/* other values derived from ctype.h */ +#define _A 0x100 /* ascii */ +#define _W 0x200 /* wildcard */ + +/* Map to _ctype flags and some magic numbers */ +static u16 token_map[TS_FSM_TYPE_MAX+1] = { + [TS_FSM_SPECIFIC] = 0, + [TS_FSM_WILDCARD] = _W, + [TS_FSM_CNTRL] = _C, + [TS_FSM_LOWER] = _L, + [TS_FSM_UPPER] = _U, + [TS_FSM_PUNCT] = _P, + [TS_FSM_SPACE] = _S, + [TS_FSM_DIGIT] = _D, + [TS_FSM_XDIGIT] = _D | _X, + [TS_FSM_ALPHA] = _U | _L, + [TS_FSM_ALNUM] = _U | _L | _D, + [TS_FSM_PRINT] = _P | _U | _L | _D | _SP, + [TS_FSM_GRAPH] = _P | _U | _L | _D, + [TS_FSM_ASCII] = _A, +}; + +static u16 token_lookup_tbl[256] = { +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ +_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ +_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */ +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */ +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */ +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */ +_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */ +_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */ +_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */ +_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */ +_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */ +_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */ +_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */ +_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */ +_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */ +_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */ +_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */ +_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */ +_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */ +_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */ +_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */ +_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */ +_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */ +_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */ +_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */ +_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */ +_W, _W, _W, _W, /* 128-131 */ +_W, _W, _W, _W, /* 132-135 */ +_W, _W, _W, _W, /* 136-139 */ +_W, _W, _W, _W, /* 140-143 */ +_W, _W, _W, _W, /* 144-147 */ +_W, _W, _W, _W, /* 148-151 */ +_W, _W, _W, _W, /* 152-155 */ +_W, _W, _W, _W, /* 156-159 */ +_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */ +_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */ +_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */ +_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */ +_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */ +_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */ +_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */ +_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */ + +static inline int match_token(struct ts_fsm_token *t, u8 d) +{ + if (t->type) + return (token_lookup_tbl[d] & t->type) != 0; + else + return t->value == d; +} + +static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state) +{ + struct ts_fsm *fsm = ts_config_priv(conf); + struct ts_fsm_token *cur = NULL, *next; + unsigned int match_start, block_idx = 0, tok_idx; + unsigned block_len = 0, strict, consumed = state->offset; + const u8 *data; + +#define GET_NEXT_BLOCK() \ +({ consumed += block_idx; \ + block_idx = 0; \ + block_len = conf->get_next_block(consumed, &data, conf, state); }) + +#define TOKEN_MISMATCH() \ + do { \ + if (strict) \ + goto no_match; \ + block_idx++; \ + goto startover; \ + } while(0) + +#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK()) + + if (end_of_data()) + goto no_match; + + strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE; + +startover: + match_start = consumed + block_idx; + + for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) { + cur = &fsm->tokens[tok_idx]; + + if (likely(tok_idx < (fsm->ntokens - 1))) + next = &fsm->tokens[tok_idx + 1]; + else + next = NULL; + + switch (cur->recur) { + case TS_FSM_SINGLE: + if (end_of_data()) + goto no_match; + + if (!match_token(cur, data[block_idx])) + TOKEN_MISMATCH(); + break; + + case TS_FSM_PERHAPS: + if (end_of_data() || + !match_token(cur, data[block_idx])) + continue; + break; + + case TS_FSM_MULTI: + if (end_of_data()) + goto no_match; + + if (!match_token(cur, data[block_idx])) + TOKEN_MISMATCH(); + + block_idx++; + /* fall through */ + + case TS_FSM_ANY: + if (next == NULL) + goto found_match; + + if (end_of_data()) + continue; + + while (!match_token(next, data[block_idx])) { + if (!match_token(cur, data[block_idx])) + TOKEN_MISMATCH(); + block_idx++; + if (end_of_data()) + goto no_match; + } + continue; + + /* + * Optimization: Prefer small local loop over jumping + * back and forth until garbage at head is munched. + */ + case TS_FSM_HEAD_IGNORE: + if (end_of_data()) + continue; + + while (!match_token(next, data[block_idx])) { + /* + * Special case, don't start over upon + * a mismatch, give the user the + * chance to specify the type of data + * allowed to be ignored. + */ + if (!match_token(cur, data[block_idx])) + goto no_match; + + block_idx++; + if (end_of_data()) + goto no_match; + } + + match_start = consumed + block_idx; + continue; + } + + block_idx++; + } + + if (end_of_data()) + goto found_match; + +no_match: + return UINT_MAX; + +found_match: + state->offset = consumed + block_idx; + return match_start; +} + +static struct ts_config *fsm_init(const void *pattern, unsigned int len, + int gfp_mask) +{ + int i, err = -EINVAL; + struct ts_config *conf; + struct ts_fsm *fsm; + struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern; + unsigned int ntokens = len / sizeof(*tokens); + size_t priv_size = sizeof(*fsm) + len; + + if (len % sizeof(struct ts_fsm_token) || ntokens < 1) + goto errout; + + for (i = 0; i < ntokens; i++) { + struct ts_fsm_token *t = &tokens[i]; + + if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX) + goto errout; + + if (t->recur == TS_FSM_HEAD_IGNORE && + (i != 0 || i == (ntokens - 1))) + goto errout; + } + + conf = alloc_ts_config(priv_size, gfp_mask); + if (IS_ERR(conf)) + return conf; + + fsm = ts_config_priv(conf); + fsm->ntokens = ntokens; + memcpy(fsm->tokens, pattern, len); + + for (i = 0; i < fsm->ntokens; i++) { + struct ts_fsm_token *t = &fsm->tokens[i]; + t->type = token_map[t->type]; + } + + return conf; + +errout: + return ERR_PTR(err); +} + +static void *fsm_get_pattern(struct ts_config *conf) +{ + struct ts_fsm *fsm = ts_config_priv(conf); + return fsm->tokens; +} + +static unsigned int fsm_get_pattern_len(struct ts_config *conf) +{ + struct ts_fsm *fsm = ts_config_priv(conf); + return fsm->ntokens * sizeof(struct ts_fsm_token); +} + +static struct ts_ops fsm_ops = { + .name = "fsm", + .find = fsm_find, + .init = fsm_init, + .get_pattern = fsm_get_pattern, + .get_pattern_len = fsm_get_pattern_len, + .owner = THIS_MODULE, + .list = LIST_HEAD_INIT(fsm_ops.list) +}; + +static int __init init_fsm(void) +{ + return textsearch_register(&fsm_ops); +} + +static void __exit exit_fsm(void) +{ + textsearch_unregister(&fsm_ops); +} + +MODULE_LICENSE("GPL"); + +module_init(init_fsm); +module_exit(exit_fsm); diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c new file mode 100644 index 000000000000..73266b975585 --- /dev/null +++ b/lib/ts_kmp.c @@ -0,0 +1,145 @@ +/* + * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf + * + * ========================================================================== + * + * Implements a linear-time string-matching algorithm due to Knuth, + * Morris, and Pratt [1]. Their algorithm avoids the explicit + * computation of the transition function DELTA altogether. Its + * matching time is O(n), for n being length(text), using just an + * auxiliary function PI[1..m], for m being length(pattern), + * precomputed from the pattern in time O(m). The array PI allows + * the transition function DELTA to be computed efficiently + * "on the fly" as needed. Roughly speaking, for any state + * "q" = 0,1,...,m and any character "a" in SIGMA, the value + * PI["q"] contains the information that is independent of "a" and + * is needed to compute DELTA("q", "a") [2]. Since the array PI + * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we + * save a factor of |SIGMA| in the preprocessing time by computing + * PI rather than DELTA. + * + * [1] Cormen, Leiserson, Rivest, Stein + * Introdcution to Algorithms, 2nd Edition, MIT Press + * [2] See finite automation theory + */ + +#include +#include +#include +#include +#include + +struct ts_kmp +{ + u8 * pattern; + unsigned int pattern_len; + unsigned int prefix_tbl[0]; +}; + +static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) +{ + struct ts_kmp *kmp = ts_config_priv(conf); + unsigned int i, q = 0, text_len, consumed = state->offset; + const u8 *text; + + for (;;) { + text_len = conf->get_next_block(consumed, &text, conf, state); + + if (unlikely(text_len == 0)) + break; + + for (i = 0; i < text_len; i++) { + while (q > 0 && kmp->pattern[q] != text[i]) + q = kmp->prefix_tbl[q - 1]; + if (kmp->pattern[q] == text[i]) + q++; + if (unlikely(q == kmp->pattern_len)) { + state->offset = consumed + i + 1; + return state->offset - kmp->pattern_len; + } + } + + consumed += text_len; + } + + return UINT_MAX; +} + +static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, + unsigned int *prefix_tbl) +{ + unsigned int k, q; + + for (k = 0, q = 1; q < len; q++) { + while (k > 0 && pattern[k] != pattern[q]) + k = prefix_tbl[k-1]; + if (pattern[k] == pattern[q]) + k++; + prefix_tbl[q] = k; + } +} + +static struct ts_config *kmp_init(const void *pattern, unsigned int len, + int gfp_mask) +{ + struct ts_config *conf; + struct ts_kmp *kmp; + unsigned int prefix_tbl_len = len * sizeof(unsigned int); + size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; + + conf = alloc_ts_config(priv_size, gfp_mask); + if (IS_ERR(conf)) + return conf; + + kmp = ts_config_priv(conf); + kmp->pattern_len = len; + compute_prefix_tbl(pattern, len, kmp->prefix_tbl); + kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; + memcpy(kmp->pattern, pattern, len); + + return conf; +} + +static void *kmp_get_pattern(struct ts_config *conf) +{ + struct ts_kmp *kmp = ts_config_priv(conf); + return kmp->pattern; +} + +static unsigned int kmp_get_pattern_len(struct ts_config *conf) +{ + struct ts_kmp *kmp = ts_config_priv(conf); + return kmp->pattern_len; +} + +static struct ts_ops kmp_ops = { + .name = "kmp", + .find = kmp_find, + .init = kmp_init, + .get_pattern = kmp_get_pattern, + .get_pattern_len = kmp_get_pattern_len, + .owner = THIS_MODULE, + .list = LIST_HEAD_INIT(kmp_ops.list) +}; + +static int __init init_kmp(void) +{ + return textsearch_register(&kmp_ops); +} + +static void __exit exit_kmp(void) +{ + textsearch_unregister(&kmp_ops); +} + +MODULE_LICENSE("GPL"); + +module_init(init_kmp); +module_exit(exit_kmp); diff --git a/net/core/dev.c b/net/core/dev.c index ab935778ce81..7016e0c36b3d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -115,18 +115,6 @@ #endif /* CONFIG_NET_RADIO */ #include -/* This define, if set, will randomly drop a packet when congestion - * is more than moderate. It helps fairness in the multi-interface - * case when one of them is a hog, but it kills performance for the - * single interface case so it is off now by default. - */ -#undef RAND_LIE - -/* Setting this will sample the queue lengths and thus congestion - * via a timer instead of as each packet is received. - */ -#undef OFFLINE_SAMPLE - /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. @@ -159,11 +147,6 @@ static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[16]; /* 16 way hashed list */ static struct list_head ptype_all; /* Taps */ -#ifdef OFFLINE_SAMPLE -static void sample_queue(unsigned long dummy); -static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0); -#endif - /* * The @dev_base list is protected by @dev_base_lock and the rtln * semaphore. @@ -215,7 +198,7 @@ static struct notifier_block *netdev_chain; * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ -DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; +DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL }; #ifdef CONFIG_SYSFS extern int netdev_sysfs_init(void); @@ -1363,71 +1346,13 @@ int dev_queue_xmit(struct sk_buff *skb) Receiver routines =======================================================================*/ -int netdev_max_backlog = 300; +int netdev_max_backlog = 1000; +int netdev_budget = 300; int weight_p = 64; /* old backlog weight */ -/* These numbers are selected based on intuition and some - * experimentatiom, if you have more scientific way of doing this - * please go ahead and fix things. - */ -int no_cong_thresh = 10; -int no_cong = 20; -int lo_cong = 100; -int mod_cong = 290; DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; -static void get_sample_stats(int cpu) -{ -#ifdef RAND_LIE - unsigned long rd; - int rq; -#endif - struct softnet_data *sd = &per_cpu(softnet_data, cpu); - int blog = sd->input_pkt_queue.qlen; - int avg_blog = sd->avg_blog; - - avg_blog = (avg_blog >> 1) + (blog >> 1); - - if (avg_blog > mod_cong) { - /* Above moderate congestion levels. */ - sd->cng_level = NET_RX_CN_HIGH; -#ifdef RAND_LIE - rd = net_random(); - rq = rd % netdev_max_backlog; - if (rq < avg_blog) /* unlucky bastard */ - sd->cng_level = NET_RX_DROP; -#endif - } else if (avg_blog > lo_cong) { - sd->cng_level = NET_RX_CN_MOD; -#ifdef RAND_LIE - rd = net_random(); - rq = rd % netdev_max_backlog; - if (rq < avg_blog) /* unlucky bastard */ - sd->cng_level = NET_RX_CN_HIGH; -#endif - } else if (avg_blog > no_cong) - sd->cng_level = NET_RX_CN_LOW; - else /* no congestion */ - sd->cng_level = NET_RX_SUCCESS; - - sd->avg_blog = avg_blog; -} - -#ifdef OFFLINE_SAMPLE -static void sample_queue(unsigned long dummy) -{ -/* 10 ms 0r 1ms -- i don't care -- JHS */ - int next_tick = 1; - int cpu = smp_processor_id(); - - get_sample_stats(cpu); - next_tick += jiffies; - mod_timer(&samp_timer, next_tick); -} -#endif - - /** * netif_rx - post buffer to the network code * @skb: buffer to post @@ -1448,7 +1373,6 @@ static void sample_queue(unsigned long dummy) int netif_rx(struct sk_buff *skb) { - int this_cpu; struct softnet_data *queue; unsigned long flags; @@ -1464,38 +1388,22 @@ int netif_rx(struct sk_buff *skb) * short when CPU is congested, but is still operating. */ local_irq_save(flags); - this_cpu = smp_processor_id(); queue = &__get_cpu_var(softnet_data); __get_cpu_var(netdev_rx_stat).total++; if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { - if (queue->throttle) - goto drop; - enqueue: dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); -#ifndef OFFLINE_SAMPLE - get_sample_stats(this_cpu); -#endif local_irq_restore(flags); - return queue->cng_level; + return NET_RX_SUCCESS; } - if (queue->throttle) - queue->throttle = 0; - netif_rx_schedule(&queue->backlog_dev); goto enqueue; } - if (!queue->throttle) { - queue->throttle = 1; - __get_cpu_var(netdev_rx_stat).throttled++; - } - -drop: __get_cpu_var(netdev_rx_stat).dropped++; local_irq_restore(flags); @@ -1780,8 +1688,6 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) smp_mb__before_clear_bit(); netif_poll_enable(backlog_dev); - if (queue->throttle) - queue->throttle = 0; local_irq_enable(); return 0; } @@ -1790,8 +1696,7 @@ static void net_rx_action(struct softirq_action *h) { struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; - int budget = netdev_max_backlog; - + int budget = netdev_budget; local_irq_disable(); @@ -2055,15 +1960,9 @@ static int softnet_seq_show(struct seq_file *seq, void *v) struct netif_rx_stats *s = v; seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", - s->total, s->dropped, s->time_squeeze, s->throttled, - s->fastroute_hit, s->fastroute_success, s->fastroute_defer, - s->fastroute_deferred_out, -#if 0 - s->fastroute_latency_reduction -#else - s->cpu_collision -#endif - ); + s->total, s->dropped, s->time_squeeze, 0, + 0, 0, 0, 0, /* was fastroute */ + s->cpu_collision ); return 0; } @@ -3305,9 +3204,6 @@ static int __init net_dev_init(void) queue = &per_cpu(softnet_data, i); skb_queue_head_init(&queue->input_pkt_queue); - queue->throttle = 0; - queue->cng_level = 0; - queue->avg_blog = 10; /* arbitrary non-zero */ queue->completion_queue = NULL; INIT_LIST_HEAD(&queue->poll_list); set_bit(__LINK_STATE_START, &queue->backlog_dev.state); @@ -3316,11 +3212,6 @@ static int __init net_dev_init(void) atomic_set(&queue->backlog_dev.refcnt, 1); } -#ifdef OFFLINE_SAMPLE - samp_timer.expires = jiffies + (10 * HZ); - add_timer(&samp_timer); -#endif - dev_boot_phase = 0; open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6d68c03bc051..bb73b2190ec7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1500,6 +1500,159 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) skb_split_no_header(skb, skb1, len, pos); } +/** + * skb_prepare_seq_read - Prepare a sequential read of skb data + * @skb: the buffer to read + * @from: lower offset of data to be read + * @to: upper offset of data to be read + * @st: state variable + * + * Initializes the specified state variable. Must be called before + * invoking skb_seq_read() for the first time. + */ +void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, + unsigned int to, struct skb_seq_state *st) +{ + st->lower_offset = from; + st->upper_offset = to; + st->root_skb = st->cur_skb = skb; + st->frag_idx = st->stepped_offset = 0; + st->frag_data = NULL; +} + +/** + * skb_seq_read - Sequentially read skb data + * @consumed: number of bytes consumed by the caller so far + * @data: destination pointer for data to be returned + * @st: state variable + * + * Reads a block of skb data at &consumed relative to the + * lower offset specified to skb_prepare_seq_read(). Assigns + * the head of the data block to &data and returns the length + * of the block or 0 if the end of the skb data or the upper + * offset has been reached. + * + * The caller is not required to consume all of the data + * returned, i.e. &consumed is typically set to the number + * of bytes already consumed and the next call to + * skb_seq_read() will return the remaining part of the block. + * + * Note: The size of each block of data returned can be arbitary, + * this limitation is the cost for zerocopy seqeuental + * reads of potentially non linear data. + * + * Note: Fragment lists within fragments are not implemented + * at the moment, state->root_skb could be replaced with + * a stack for this purpose. + */ +unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st) +{ + unsigned int block_limit, abs_offset = consumed + st->lower_offset; + skb_frag_t *frag; + + if (unlikely(abs_offset >= st->upper_offset)) + return 0; + +next_skb: + block_limit = skb_headlen(st->cur_skb); + + if (abs_offset < block_limit) { + *data = st->cur_skb->data + abs_offset; + return block_limit - abs_offset; + } + + if (st->frag_idx == 0 && !st->frag_data) + st->stepped_offset += skb_headlen(st->cur_skb); + + while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { + frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; + block_limit = frag->size + st->stepped_offset; + + if (abs_offset < block_limit) { + if (!st->frag_data) + st->frag_data = kmap_skb_frag(frag); + + *data = (u8 *) st->frag_data + frag->page_offset + + (abs_offset - st->stepped_offset); + + return block_limit - abs_offset; + } + + if (st->frag_data) { + kunmap_skb_frag(st->frag_data); + st->frag_data = NULL; + } + + st->frag_idx++; + st->stepped_offset += frag->size; + } + + if (st->cur_skb->next) { + st->cur_skb = st->cur_skb->next; + st->frag_idx = 0; + goto next_skb; + } else if (st->root_skb == st->cur_skb && + skb_shinfo(st->root_skb)->frag_list) { + st->cur_skb = skb_shinfo(st->root_skb)->frag_list; + goto next_skb; + } + + return 0; +} + +/** + * skb_abort_seq_read - Abort a sequential read of skb data + * @st: state variable + * + * Must be called if skb_seq_read() was not called until it + * returned 0. + */ +void skb_abort_seq_read(struct skb_seq_state *st) +{ + if (st->frag_data) + kunmap_skb_frag(st->frag_data); +} + +#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) + +static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, + struct ts_config *conf, + struct ts_state *state) +{ + return skb_seq_read(offset, text, TS_SKB_CB(state)); +} + +static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) +{ + skb_abort_seq_read(TS_SKB_CB(state)); +} + +/** + * skb_find_text - Find a text pattern in skb data + * @skb: the buffer to look in + * @from: search offset + * @to: search limit + * @config: textsearch configuration + * @state: uninitialized textsearch state variable + * + * Finds a pattern in the skb data according to the specified + * textsearch configuration. Use textsearch_next() to retrieve + * subsequent occurrences of the pattern. Returns the offset + * to the first occurrence or UINT_MAX if no match was found. + */ +unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state) +{ + config->get_next_block = skb_ts_get_next_block; + config->finish = skb_ts_finish; + + skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); + + return textsearch_find(config, state); +} + void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", @@ -1538,3 +1691,7 @@ EXPORT_SYMBOL(skb_queue_tail); EXPORT_SYMBOL(skb_unlink); EXPORT_SYMBOL(skb_append); EXPORT_SYMBOL(skb_split); +EXPORT_SYMBOL(skb_prepare_seq_read); +EXPORT_SYMBOL(skb_seq_read); +EXPORT_SYMBOL(skb_abort_seq_read); +EXPORT_SYMBOL(skb_find_text); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 880a88815211..8f817ad9f546 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -13,12 +13,8 @@ #ifdef CONFIG_SYSCTL extern int netdev_max_backlog; +extern int netdev_budget; extern int weight_p; -extern int no_cong_thresh; -extern int no_cong; -extern int lo_cong; -extern int mod_cong; -extern int netdev_fastroute; extern int net_msg_cost; extern int net_msg_burst; @@ -85,38 +81,6 @@ ctl_table core_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, - { - .ctl_name = NET_CORE_NO_CONG_THRESH, - .procname = "no_cong_thresh", - .data = &no_cong_thresh, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_NO_CONG, - .procname = "no_cong", - .data = &no_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_LO_CONG, - .procname = "lo_cong", - .data = &lo_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_MOD_CONG, - .procname = "mod_cong", - .data = &mod_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, { .ctl_name = NET_CORE_MSG_COST, .procname = "message_cost", @@ -161,6 +125,14 @@ ctl_table core_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_CORE_BUDGET, + .procname = "netdev_budget", + .data = &netdev_budget, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .ctl_name = 0 } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f3dbc8dc1263..882436da9a3a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1927,6 +1927,25 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, return tp->af_specific->setsockopt(sk, level, optname, optval, optlen); + /* This is a string value all the others are int's */ + if (optname == TCP_CONGESTION) { + char name[TCP_CA_NAME_MAX]; + + if (optlen < 1) + return -EINVAL; + + val = strncpy_from_user(name, optval, + min(TCP_CA_NAME_MAX-1, optlen)); + if (val < 0) + return -EFAULT; + name[val] = 0; + + lock_sock(sk); + err = tcp_set_congestion_control(tp, name); + release_sock(sk); + return err; + } + if (optlen < sizeof(int)) return -EINVAL; @@ -2211,6 +2230,16 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, case TCP_QUICKACK: val = !tp->ack.pingpong; break; + + case TCP_CONGESTION: + if (get_user(len, optlen)) + return -EFAULT; + len = min_t(unsigned int, len, TCP_CA_NAME_MAX); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, tp->ca_ops->name, len)) + return -EFAULT; + return 0; default: return -ENOPROTOOPT; }; @@ -2224,7 +2253,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, extern void __skb_cb_too_small_for_tcp(int, int); -extern void tcpdiag_init(void); +extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 665394a63ae4..4970d10a7785 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -21,7 +21,7 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name) { struct tcp_congestion_ops *e; - list_for_each_entry(e, &tcp_cong_list, list) { + list_for_each_entry_rcu(e, &tcp_cong_list, list) { if (strcmp(e->name, name) == 0) return e; } @@ -77,6 +77,9 @@ void tcp_init_congestion_control(struct tcp_sock *tp) { struct tcp_congestion_ops *ca; + if (tp->ca_ops != &tcp_init_congestion_ops) + return; + rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { if (try_module_get(ca->owner)) { @@ -139,6 +142,34 @@ void tcp_get_default_congestion_control(char *name) rcu_read_unlock(); } +/* Change congestion control for socket */ +int tcp_set_congestion_control(struct tcp_sock *tp, const char *name) +{ + struct tcp_congestion_ops *ca; + int err = 0; + + rcu_read_lock(); + ca = tcp_ca_find(name); + if (ca == tp->ca_ops) + goto out; + + if (!ca) + err = -ENOENT; + + else if (!try_module_get(ca->owner)) + err = -EBUSY; + + else { + tcp_cleanup_congestion_control(tp); + tp->ca_ops = ca; + if (tp->ca_ops->init) + tp->ca_ops->init(tp); + } + out: + rcu_read_unlock(); + return err; +} + /* * TCP Reno congestion control * This is special case used for fallback as well. @@ -192,4 +223,15 @@ struct tcp_congestion_ops tcp_reno = { .min_cwnd = tcp_reno_min_cwnd, }; -EXPORT_SYMBOL_GPL(tcp_reno); +/* Initial congestion control used (until SYN) + * really reno under another name so we can tell difference + * during tcp_set_default_congestion_control + */ +struct tcp_congestion_ops tcp_init_congestion_ops = { + .name = "", + .owner = THIS_MODULE, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, +}; +EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9122814c13ad..ebf112347a97 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2048,7 +2048,7 @@ static int tcp_v4_init_sock(struct sock *sk) tp->mss_cache_std = tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; - tp->ca_ops = &tcp_reno; + tp->ca_ops = &tcp_init_congestion_ops; sk->sk_state = TCP_CLOSE; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index fce56039b0e9..9dac7fdf4726 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2025,7 +2025,7 @@ static int tcp_v6_init_sock(struct sock *sk) sk->sk_state = TCP_CLOSE; tp->af_specific = &ipv6_specific; - tp->ca_ops = &tcp_reno; + tp->ca_ops = &tcp_init_congestion_ops; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); diff --git a/net/sched/Kconfig b/net/sched/Kconfig index b22c9beb604d..447b89e556b1 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -449,6 +449,18 @@ config NET_EMATCH_META To compile this code as a module, choose M here: the module will be called em_meta. +config NET_EMATCH_TEXT + tristate "Textsearch" + depends on NET_EMATCH + select TEXTSEARCH + ---help--- + Say Y here if you want to be ablt to classify packets based on + textsearch comparisons. Please select the appropriate textsearch + algorithms in the Library section. + + To compile this code as a module, choose M here: the + module will be called em_text. + config NET_CLS_ACT bool "Packet ACTION" depends on EXPERIMENTAL && NET_CLS && NET_QOS diff --git a/net/sched/Makefile b/net/sched/Makefile index eb3fe583eba8..8f58cecd6266 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -40,3 +40,4 @@ obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o obj-$(CONFIG_NET_EMATCH_META) += em_meta.o +obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o diff --git a/net/sched/em_text.c b/net/sched/em_text.c new file mode 100644 index 000000000000..873840d8d072 --- /dev/null +++ b/net/sched/em_text.c @@ -0,0 +1,157 @@ +/* + * net/sched/em_text.c Textsearch ematch + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct text_match +{ + u16 from_offset; + u16 to_offset; + u8 from_layer; + u8 to_layer; + struct ts_config *config; +}; + +#define EM_TEXT_PRIV(m) ((struct text_match *) (m)->data) + +static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, + struct tcf_pkt_info *info) +{ + struct text_match *tm = EM_TEXT_PRIV(m); + int from, to; + struct ts_state state; + + from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data; + from += tm->from_offset; + + to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data; + to += tm->to_offset; + + return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX; +} + +static int em_text_change(struct tcf_proto *tp, void *data, int len, + struct tcf_ematch *m) +{ + struct text_match *tm; + struct tcf_em_text *conf = data; + struct ts_config *ts_conf; + int flags = 0; + + printk("Configuring text: %s from %d:%d to %d:%d len %d\n", conf->algo, conf->from_offset, + conf->from_layer, conf->to_offset, conf->to_layer, conf->pattern_len); + + if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len)) + return -EINVAL; + + if (conf->from_layer > conf->to_layer) + return -EINVAL; + + if (conf->from_layer == conf->to_layer && + conf->from_offset > conf->to_offset) + return -EINVAL; + +retry: + ts_conf = textsearch_prepare(conf->algo, (u8 *) conf + sizeof(*conf), + conf->pattern_len, GFP_KERNEL, flags); + + if (flags & TS_AUTOLOAD) + rtnl_lock(); + + if (IS_ERR(ts_conf)) { + if (PTR_ERR(ts_conf) == -ENOENT && !(flags & TS_AUTOLOAD)) { + rtnl_unlock(); + flags |= TS_AUTOLOAD; + goto retry; + } else + return PTR_ERR(ts_conf); + } else if (flags & TS_AUTOLOAD) { + textsearch_destroy(ts_conf); + return -EAGAIN; + } + + tm = kmalloc(sizeof(*tm), GFP_KERNEL); + if (tm == NULL) { + textsearch_destroy(ts_conf); + return -ENOBUFS; + } + + tm->from_offset = conf->from_offset; + tm->to_offset = conf->to_offset; + tm->from_layer = conf->from_layer; + tm->to_layer = conf->to_layer; + tm->config = ts_conf; + + m->datalen = sizeof(*tm); + m->data = (unsigned long) tm; + + return 0; +} + +static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +{ + textsearch_destroy(EM_TEXT_PRIV(m)->config); +} + +static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) +{ + struct text_match *tm = EM_TEXT_PRIV(m); + struct tcf_em_text conf; + + strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1); + conf.from_offset = tm->from_offset; + conf.to_offset = tm->to_offset; + conf.from_layer = tm->from_layer; + conf.to_layer = tm->to_layer; + conf.pattern_len = textsearch_get_pattern_len(tm->config); + conf.pad = 0; + + RTA_PUT_NOHDR(skb, sizeof(conf), &conf); + RTA_APPEND(skb, conf.pattern_len, textsearch_get_pattern(tm->config)); + return 0; + +rtattr_failure: + return -1; +} + +static struct tcf_ematch_ops em_text_ops = { + .kind = TCF_EM_TEXT, + .change = em_text_change, + .match = em_text_match, + .destroy = em_text_destroy, + .dump = em_text_dump, + .owner = THIS_MODULE, + .link = LIST_HEAD_INIT(em_text_ops.link) +}; + +static int __init init_em_text(void) +{ + return tcf_em_register(&em_text_ops); +} + +static void __exit exit_em_text(void) +{ + tcf_em_unregister(&em_text_ops); +} + +MODULE_LICENSE("GPL"); + +module_init(init_em_text); +module_exit(exit_em_text);