diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/atmdev.h | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | include/linux/mmzone.h | 38 | ||||
-rw-r--r-- | include/linux/net.h | 1 | ||||
-rw-r--r-- | include/linux/netdevice.h | 18 | ||||
-rw-r--r-- | include/linux/page-flags.h | 149 | ||||
-rw-r--r-- | include/linux/pagemap.h | 45 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 24 | ||||
-rw-r--r-- | include/linux/rtc.h | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | include/linux/security.h | 51 | ||||
-rw-r--r-- | include/linux/skbuff.h | 9 | ||||
-rw-r--r-- | include/linux/smp.h | 2 | ||||
-rw-r--r-- | include/linux/sunrpc/svc.h | 4 | ||||
-rw-r--r-- | include/linux/swap.h | 1 | ||||
-rw-r--r-- | include/linux/vmstat.h | 215 |
16 files changed, 350 insertions, 223 deletions
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 1eb238affb1..41788a31c43 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -7,6 +7,7 @@ #define LINUX_ATMDEV_H +#include <linux/device.h> #include <linux/atmapi.h> #include <linux/atm.h> #include <linux/atmioc.h> @@ -358,6 +359,7 @@ struct atm_dev { struct proc_dir_entry *proc_entry; /* proc entry */ char *proc_name; /* proc entry name */ #endif + struct class_device class_dev; /* sysfs class device */ struct list_head dev_list; /* linkage */ }; @@ -459,7 +461,7 @@ static inline void atm_dev_put(struct atm_dev *dev) BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); - kfree(dev); + class_device_put(&dev->class_dev); } } diff --git a/include/linux/mm.h b/include/linux/mm.h index c41a1299b8c..75179529e39 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout; #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/atomic.h> #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone, set_page_section(page, pfn_to_section_nr(pfn)); } +/* + * Some inline functions in vmstat.h depend on page_zone() + */ +#include <linux/vmstat.h> + #ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d6120fa6911..27e748eb72b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -46,6 +46,27 @@ struct zone_padding { #define ZONE_PADDING(name) #endif +enum zone_stat_item { + NR_ANON_PAGES, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_SLAB, /* Pages used by slab allocator */ + NR_PAGETABLE, /* used for pagetables */ + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_BOUNCE, +#ifdef CONFIG_NUMA + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ + NUMA_FOREIGN, /* was intended here, hit elsewhere */ + NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ + NUMA_LOCAL, /* allocation from local node */ + NUMA_OTHER, /* allocation from other node */ +#endif + NR_VM_ZONE_STAT_ITEMS }; + struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ @@ -55,13 +76,8 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ -#ifdef CONFIG_NUMA - unsigned long numa_hit; /* allocated in intended node */ - unsigned long numa_miss; /* allocated in non intended node */ - unsigned long numa_foreign; /* was intended here, hit elsewhere */ - unsigned long interleave_hit; /* interleaver prefered this zone */ - unsigned long local_node; /* allocation from local node */ - unsigned long other_node; /* allocation from other node */ +#ifdef CONFIG_SMP + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif } ____cacheline_aligned_in_smp; @@ -165,12 +181,8 @@ struct zone { /* A count of how many reclaimers are scanning this zone */ atomic_t reclaim_in_progress; - /* - * timestamp (in jiffies) of the last zone reclaim that did not - * result in freeing of pages. This is used to avoid repeated scans - * if all memory in the zone is in use. - */ - unsigned long last_unsuccessful_zone_reclaim; + /* Zone statistics */ + atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; /* * prev_priority holds the scanning priority for this zone. It is diff --git a/include/linux/net.h b/include/linux/net.h index 385e68f5bd9..b20c53c7441 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -61,6 +61,7 @@ typedef enum { #define SOCK_ASYNC_WAITDATA 1 #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 +#define SOCK_PASSSEC 4 #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 03cd7551a7a..aa2d3c12c4d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -315,6 +315,8 @@ struct net_device #define NETIF_F_GSO_SHIFT 16 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) +#define NETIF_F_TSO_ECN (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) @@ -543,7 +545,8 @@ struct packet_type { struct net_device *, struct packet_type *, struct net_device *); - struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + int features); void *af_packet_priv; struct list_head list; }; @@ -968,7 +971,7 @@ extern int netdev_max_backlog; extern int weight_p; extern int netdev_set_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb, int inward); -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -988,11 +991,16 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); extern void linkwatch_run_queue(void); +static inline int skb_gso_ok(struct sk_buff *skb, int features) +{ + int feature = skb_shinfo(skb)->gso_size ? + skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0; + return (features & feature) == feature; +} + static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { - int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; - return skb_shinfo(skb)->gso_size && - (dev->features & feature) != feature; + return !skb_gso_ok(skb, dev->features); } #endif /* __KERNEL__ */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0c076d58c67..5748642e9f3 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -5,12 +5,8 @@ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H -#include <linux/percpu.h> -#include <linux/cache.h> #include <linux/types.h> -#include <asm/pgtable.h> - /* * Various page->flags bits: * @@ -103,134 +99,6 @@ #endif /* - * Global page accounting. One instance per CPU. Only unsigned longs are - * allowed. - * - * - Fields can be modified with xxx_page_state and xxx_page_state_zone at - * any time safely (which protects the instance from modification by - * interrupt. - * - The __xxx_page_state variants can be used safely when interrupts are - * disabled. - * - The __xxx_page_state variants can be used if the field is only - * modified from process context and protected from preemption, or only - * modified from interrupt context. In this case, the field should be - * commented here. - */ -struct page_state { - unsigned long nr_dirty; /* Dirty writeable pages */ - unsigned long nr_writeback; /* Pages under writeback */ - unsigned long nr_unstable; /* NFS unstable pages */ - unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_mapped; /* mapped into pagetables. - * only modified from process context */ - unsigned long nr_slab; /* In slab */ -#define GET_PAGE_STATE_LAST nr_slab - - /* - * The below are zeroed by get_page_state(). Use get_full_page_state() - * to add up all these. - */ - unsigned long pgpgin; /* Disk reads */ - unsigned long pgpgout; /* Disk writes */ - unsigned long pswpin; /* swap reads */ - unsigned long pswpout; /* swap writes */ - - unsigned long pgalloc_high; /* page allocations */ - unsigned long pgalloc_normal; - unsigned long pgalloc_dma32; - unsigned long pgalloc_dma; - - unsigned long pgfree; /* page freeings */ - unsigned long pgactivate; /* pages moved inactive->active */ - unsigned long pgdeactivate; /* pages moved active->inactive */ - - unsigned long pgfault; /* faults (major+minor) */ - unsigned long pgmajfault; /* faults (major only) */ - - unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ - unsigned long pgrefill_normal; - unsigned long pgrefill_dma32; - unsigned long pgrefill_dma; - - unsigned long pgsteal_high; /* total highmem pages reclaimed */ - unsigned long pgsteal_normal; - unsigned long pgsteal_dma32; - unsigned long pgsteal_dma; - - unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ - unsigned long pgscan_kswapd_normal; - unsigned long pgscan_kswapd_dma32; - unsigned long pgscan_kswapd_dma; - - unsigned long pgscan_direct_high;/* total highmem pages scanned */ - unsigned long pgscan_direct_normal; - unsigned long pgscan_direct_dma32; - unsigned long pgscan_direct_dma; - - unsigned long pginodesteal; /* pages reclaimed via inode freeing */ - unsigned long slabs_scanned; /* slab objects scanned */ - unsigned long kswapd_steal; /* pages reclaimed by kswapd */ - unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ - unsigned long pageoutrun; /* kswapd's calls to page reclaim */ - unsigned long allocstall; /* direct reclaim calls */ - - unsigned long pgrotated; /* pages rotated to tail of the LRU */ - unsigned long nr_bounce; /* pages for bounce buffers */ -}; - -extern void get_page_state(struct page_state *ret); -extern void get_page_state_node(struct page_state *ret, int node); -extern void get_full_page_state(struct page_state *ret); -extern unsigned long read_page_state_offset(unsigned long offset); -extern void mod_page_state_offset(unsigned long offset, unsigned long delta); -extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); - -#define read_page_state(member) \ - read_page_state_offset(offsetof(struct page_state, member)) - -#define mod_page_state(member, delta) \ - mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define __mod_page_state(member, delta) \ - __mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define inc_page_state(member) mod_page_state(member, 1UL) -#define dec_page_state(member) mod_page_state(member, 0UL - 1) -#define add_page_state(member,delta) mod_page_state(member, (delta)) -#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) - -#define __inc_page_state(member) __mod_page_state(member, 1UL) -#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) -#define __add_page_state(member,delta) __mod_page_state(member, (delta)) -#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) - -#define page_state(member) (*__page_state(offsetof(struct page_state, member))) - -#define state_zone_offset(zone, member) \ -({ \ - unsigned offset; \ - if (is_highmem(zone)) \ - offset = offsetof(struct page_state, member##_high); \ - else if (is_normal(zone)) \ - offset = offsetof(struct page_state, member##_normal); \ - else if (is_dma32(zone)) \ - offset = offsetof(struct page_state, member##_dma32); \ - else \ - offset = offsetof(struct page_state, member##_dma); \ - offset; \ -}) - -#define __mod_page_state_zone(zone, member, delta) \ - do { \ - __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -#define mod_page_state_zone(zone, member, delta) \ - do { \ - mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -/* * Manipulation of page state flags */ #define PageLocked(page) \ @@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) -#ifndef SetPageUptodate +#ifdef CONFIG_S390 +#define SetPageUptodate(_page) \ + do { \ + struct page *__page = (_page); \ + if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ + page_test_and_clear_dirty(_page); \ + } while (0) +#else #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #endif #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) @@ -306,7 +181,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); do { \ if (!test_and_set_bit(PG_writeback, \ &(page)->flags)) \ - inc_page_state(nr_writeback); \ + inc_zone_page_state(page, NR_WRITEBACK); \ } while (0) #define TestSetPageWriteback(page) \ ({ \ @@ -314,14 +189,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); ret = test_and_set_bit(PG_writeback, \ &(page)->flags); \ if (!ret) \ - inc_page_state(nr_writeback); \ + inc_zone_page_state(page, NR_WRITEBACK); \ ret; \ }) #define ClearPageWriteback(page) \ do { \ if (test_and_clear_bit(PG_writeback, \ &(page)->flags)) \ - dec_page_state(nr_writeback); \ + dec_zone_page_state(page, NR_WRITEBACK); \ } while (0) #define TestClearPageWriteback(page) \ ({ \ @@ -329,7 +204,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); ret = test_and_clear_bit(PG_writeback, \ &(page)->flags); \ if (ret) \ - dec_page_state(nr_writeback); \ + dec_zone_page_state(page, NR_WRITEBACK); \ ret; \ }) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141a..0a2f5d27f60 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - -/* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. - */ -static inline void pagecache_acct(int count) -{ - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - int ret = atomic_read(&nr_pagecache); - if (unlikely(ret < 0)) - ret = 0; - return ret; -} - /* * Return byte-offset into filesystem object for page. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 48dfe00070c..b4ca73d6589 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -163,14 +163,22 @@ extern int rcu_needs_cpu(int cpu); * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() preempt_disable() +#define rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + } while(0) /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() preempt_enable() +#define rcu_read_unlock() \ + do { \ + __release(RCU); \ + preempt_enable(); \ + } while(0) /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -193,14 +201,22 @@ extern int rcu_needs_cpu(int cpu); * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() local_bh_disable() +#define rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + } while(0) /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() local_bh_enable() +#define rcu_read_unlock_bh() \ + do { \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while(0) /** * rcu_dereference - fetch an RCU-protected pointer in an diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 36e2bf4b431..5371e4e7459 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -34,8 +34,8 @@ struct rtc_time { * alarm API. */ struct rtc_wkalrm { - unsigned char enabled; /* 0 = alarm disable, 1 = alarm disabled */ - unsigned char pending; /* 0 = alarm pending, 1 = alarm not pending */ + unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */ + unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */ struct rtc_time time; /* time the alarm is set to */ }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 821f0481ebe..aaf723308ed 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1153,7 +1153,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); +extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); diff --git a/include/linux/security.h b/include/linux/security.h index 51805806f97..f75303831d0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -67,7 +67,7 @@ struct xfrm_state; struct xfrm_user_sec_ctx; extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); -extern int cap_netlink_recv(struct sk_buff *skb); +extern int cap_netlink_recv(struct sk_buff *skb, int cap); /* * Values used in the task_security_ops calls @@ -567,6 +567,9 @@ struct swap_info_struct; * @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. + * @task_getsecid: + * Retrieve the security identifier of the process @p. + * @p contains the task_struct for the process and place is into @secid. * @task_setgroups: * Check permission before setting the supplementary group set of the * current process. @@ -582,6 +585,10 @@ struct swap_info_struct; * @p contains the task_struct of process. * @ioprio contains the new ioprio value * Return 0 if permission is granted. + * @task_getioprio + * Check permission before getting the ioprio value of @p. + * @p contains the task_struct of process. + * Return 0 if permission is granted. * @task_setrlimit: * Check permission before setting the resource limits of the current * process for @resource to @new_rlim. The old resource limit values can @@ -615,6 +622,7 @@ struct swap_info_struct; * @p contains the task_struct for process. * @info contains the signal information. * @sig contains the signal value. + * @secid contains the sid of the process where the signal originated * Return 0 if permission is granted. * @task_wait: * Check permission before allowing a process to reap a child process @p @@ -656,6 +664,7 @@ struct swap_info_struct; * Check permission before processing the received netlink message in * @skb. * @skb contains the sk_buff structure for the netlink message. + * @cap indicates the capability required * Return 0 if permission is granted. * * Security hooks for Unix domain networking. @@ -1218,16 +1227,18 @@ struct security_operations { int (*task_setpgid) (struct task_struct * p, pid_t pgid); int (*task_getpgid) (struct task_struct * p); int (*task_getsid) (struct task_struct * p); + void (*task_getsecid) (struct task_struct * p, u32 * secid); int (*task_setgroups) (struct group_info *group_info); int (*task_setnice) (struct task_struct * p, int nice); int (*task_setioprio) (struct task_struct * p, int ioprio); + int (*task_getioprio) (struct task_struct * p); int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); int (*task_setscheduler) (struct task_struct * p, int policy, struct sched_param * lp); int (*task_getscheduler) (struct task_struct * p); int (*task_movememory) (struct task_struct * p); int (*task_kill) (struct task_struct * p, - struct siginfo * info, int sig); + struct siginfo * info, int sig, u32 secid); int (*task_wait) (struct task_struct * p); int (*task_prctl) (int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, @@ -1266,7 +1277,7 @@ struct security_operations { struct sembuf * sops, unsigned nsops, int alter); int (*netlink_send) (struct sock * sk, struct sk_buff * skb); - int (*netlink_recv) (struct sk_buff * skb); + int (*netlink_recv) (struct sk_buff * skb, int cap); /* allow module stacking */ int (*register_security) (const char *name, @@ -1838,6 +1849,11 @@ static inline int security_task_getsid (struct task_struct *p) return security_ops->task_getsid (p); } +static inline void security_task_getsecid (struct task_struct *p, u32 *secid) +{ + security_ops->task_getsecid (p, secid); +} + static inline int security_task_setgroups (struct group_info *group_info) { return security_ops->task_setgroups (group_info); @@ -1853,6 +1869,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio) return security_ops->task_setioprio (p, ioprio); } +static inline int security_task_getioprio (struct task_struct *p) +{ + return security_ops->task_getioprio (p); +} + static inline int security_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) { @@ -1877,9 +1898,10 @@ static inline int security_task_movememory (struct task_struct *p) } static inline int security_task_kill (struct task_struct *p, - struct siginfo *info, int sig) + struct siginfo *info, int sig, + u32 secid) { - return security_ops->task_kill (p, info, sig); + return security_ops->task_kill (p, info, sig, secid); } static inline int security_task_wait (struct task_struct *p) @@ -2032,9 +2054,9 @@ static inline int security_netlink_send(struct sock *sk, struct sk_buff * skb) return security_ops->netlink_send(sk, skb); } -static inline int security_netlink_recv(struct sk_buff * skb) +static inline int security_netlink_recv(struct sk_buff * skb, int cap) { - return security_ops->netlink_recv(skb); + return security_ops->netlink_recv(skb, cap); } /* prototypes */ @@ -2490,6 +2512,9 @@ static inline int security_task_getsid (struct task_struct *p) return 0; } +static inline void security_task_getsecid (struct task_struct *p, u32 *secid) +{ } + static inline int security_task_setgroups (struct group_info *group_info) { return 0; @@ -2505,6 +2530,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio) return 0; } +static inline int security_task_getioprio (struct task_struct *p) +{ + return 0; +} + static inline int security_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) { @@ -2529,7 +2559,8 @@ static inline int security_task_movememory (struct task_struct *p) } static inline int security_task_kill (struct task_struct *p, - struct siginfo *info, int sig) + struct siginfo *info, int sig, + u32 secid) { return 0; } @@ -2670,9 +2701,9 @@ static inline int security_netlink_send (struct sock *sk, struct sk_buff *skb) return cap_netlink_send (sk, skb); } -static inline int security_netlink_recv (struct sk_buff *skb) +static inline int security_netlink_recv (struct sk_buff *skb, int cap) { - return cap_netlink_recv (skb); + return cap_netlink_recv (skb, cap); } static inline struct dentry *securityfs_create_dir(const char *name, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 16eef03ce0e..59918be91d0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -172,6 +172,12 @@ enum { enum { SKB_GSO_TCPV4 = 1 << 0, SKB_GSO_UDPV4 = 1 << 1, + + /* This indicates the skb is from an untrusted source. */ + SKB_GSO_DODGY = 1 << 2, + + /* This indicates the tcp segment has CWR set. */ + SKB_GSO_TCPV4_ECN = 1 << 3, }; /** @@ -1298,8 +1304,7 @@ extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); -extern void skb_release_data(struct sk_buff *skb); -extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg); +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) diff --git a/include/linux/smp.h b/include/linux/smp.h index c93c3fe4308..837e8bce134 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -125,4 +125,6 @@ static inline void smp_send_reschedule(int cpu) { } #define put_cpu() preempt_enable() #define put_cpu_no_resched() preempt_enable_no_resched() +void smp_setup_processor_id(void); + #endif /* __LINUX_SMP_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 50356438454..7b27c09b560 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -159,7 +159,9 @@ struct svc_rqst { * determine what device number * to report (real or virtual) */ - + int rq_sendfile_ok; /* turned off in gss privacy + * to prevent encrypting page + * cache pages */ wait_queue_head_t rq_wait; /* synchronization */ }; diff --git a/include/linux/swap.h b/include/linux/swap.h index c41e2d6d1ac..cf6ca6e377b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -189,7 +189,6 @@ extern long vm_total_pages; #ifdef CONFIG_NUMA extern int zone_reclaim_mode; -extern int zone_reclaim_interval; extern int zone_reclaim(struct zone *, gfp_t, unsigned int); #else #define zone_reclaim_mode 0 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h new file mode 100644 index 00000000000..3e0daf54133 --- /dev/null +++ b/include/linux/vmstat.h @@ -0,0 +1,215 @@ +#ifndef _LINUX_VMSTAT_H +#define _LINUX_VMSTAT_H + +#include <linux/types.h> +#include <linux/percpu.h> +#include <linux/config.h> +#include <linux/mmzone.h> +#include <asm/atomic.h> + +#ifdef CONFIG_VM_EVENT_COUNTERS +/* + * Light weight per cpu counter implementation. + * + * Counters should only be incremented and no critical kernel component + * should rely on the counter values. + * + * Counters are handled completely inline. On many platforms the code + * generated will simply be the increment of a global address. + */ + +#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH + +enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + FOR_ALL_ZONES(PGALLOC), + PGFREE, PGACTIVATE, PGDEACTIVATE, + PGFAULT, PGMAJFAULT, + FOR_ALL_ZONES(PGREFILL), + FOR_ALL_ZONES(PGSTEAL), + FOR_ALL_ZONES(PGSCAN_KSWAPD), + FOR_ALL_ZONES(PGSCAN_DIRECT), + PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, + PAGEOUTRUN, ALLOCSTALL, PGROTATED, + NR_VM_EVENT_ITEMS +}; + +struct vm_event_state { + unsigned long event[NR_VM_EVENT_ITEMS]; +}; + +DECLARE_PER_CPU(struct vm_event_state, vm_event_states); + +static inline void __count_vm_event(enum vm_event_item item) +{ + __get_cpu_var(vm_event_states.event[item])++; +} + +static inline void count_vm_event(enum vm_event_item item) +{ + get_cpu_var(vm_event_states.event[item])++; + put_cpu(); +} + +static inline void __count_vm_events(enum vm_event_item item, long delta) +{ + __get_cpu_var(vm_event_states.event[item]) += delta; +} + +static inline void count_vm_events(enum vm_event_item item, long delta) +{ + get_cpu_var(vm_event_states.event[item])++; + put_cpu(); +} + +extern void all_vm_events(unsigned long *); +extern void vm_events_fold_cpu(int cpu); + +#else + +/* Disable counters */ +#define get_cpu_vm_events(e) 0L +#define count_vm_event(e) do { } while (0) +#define count_vm_events(e,d) do { } while (0) +#define __count_vm_event(e) do { } while (0) +#define __count_vm_events(e,d) do { } while (0) +#define vm_events_fold_cpu(x) do { } while (0) + +#endif /* CONFIG_VM_EVENT_COUNTERS */ + +#define __count_zone_vm_events(item, zone, delta) \ + __count_vm_events(item##_DMA + zone_idx(zone), delta) + +/* + * Zone based page accounting with per cpu differentials. + */ +extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + +static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) +{ + atomic_long_add(x, &zone->vm_stat[item]); + atomic_long_add(x, &vm_stat[item]); +} + +static inline unsigned long global_page_state(enum zone_stat_item item) +{ + long x = atomic_long_read(&vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +#ifdef CONFIG_NUMA +/* + * Determine the per node value of a stat item. This function + * is called frequently in a NUMA machine, so try to be as + * frugal as possible. + */ +static inline unsigned long node_page_state(int node, + enum zone_stat_item item) +{ + struct zone *zones = NODE_DATA(node)->node_zones; + + return +#ifndef CONFIG_DMA_IS_NORMAL +#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 + zone_page_state(&zones[ZONE_DMA32], item) + +#endif + zone_page_state(&zones[ZONE_NORMAL], item) + +#endif +#ifdef CONFIG_HIGHMEM + zone_page_state(&zones[ZONE_HIGHMEM], item) + +#endif + zone_page_state(&zones[ZONE_DMA], item); +} + +extern void zone_statistics(struct zonelist *, struct zone *); + +#else + +#define node_page_state(node, item) global_page_state(item) +#define zone_statistics(_zl,_z) do { } while (0) + +#endif /* CONFIG_NUMA */ + +#define __add_zone_page_state(__z, __i, __d) \ + __mod_zone_page_state(__z, __i, __d) +#define __sub_zone_page_state(__z, __i, __d) \ + __mod_zone_page_state(__z, __i,-(__d)) + +#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) +#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) + +static inline void zap_zone_vm_stats(struct zone *zone) +{ + memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); +} + +extern void inc_zone_state(struct zone *, enum zone_stat_item); + +#ifdef CONFIG_SMP +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void __inc_zone_page_state(struct page *, enum zone_stat_item); +void __dec_zone_page_state(struct page *, enum zone_stat_item); + +void mod_zone_page_state(struct zone *, enum zone_stat_item, int); +void inc_zone_page_state(struct page *, enum zone_stat_item); +void dec_zone_page_state(struct page *, enum zone_stat_item); + +extern void inc_zone_state(struct zone *, enum zone_stat_item); + +void refresh_cpu_vm_stats(int); +void refresh_vm_stats(void); + +#else /* CONFIG_SMP */ + +/* + * We do not maintain differentials in a single processor configuration. + * The functions directly modify the zone and global counters. + */ +static inline void __mod_zone_page_state(struct zone *zone, + enum zone_stat_item item, int delta) +{ + zone_page_state_add(delta, zone, item); +} + +static inline void __inc_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + atomic_long_inc(&page_zone(page)->vm_stat[item]); + atomic_long_inc(&vm_stat[item]); +} + +static inline void __dec_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + atomic_long_dec(&page_zone(page)->vm_stat[item]); + atomic_long_dec(&vm_stat[item]); +} + +/* + * We only use atomic operations to update counters. So there is no need to + * disable interrupts. + */ +#define inc_zone_page_state __inc_zone_page_state +#define dec_zone_page_state __dec_zone_page_state +#define mod_zone_page_state __mod_zone_page_state + +static inline void refresh_cpu_vm_stats(int cpu) { } +static inline void refresh_vm_stats(void) { } +#endif + +#endif /* _LINUX_VMSTAT_H */ |