summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt21
-rw-r--r--Documentation/drivers/edac/edac.txt34
-rw-r--r--Documentation/networking/vortex.txt81
-rw-r--r--Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl2
-rw-r--r--MAINTAINERS44
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/kernel/osf_sys.c1
-rw-r--r--arch/alpha/lib/ev6-memchr.S2
-rw-r--r--arch/alpha/lib/fpreg.c8
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm26/Kconfig4
-rw-r--r--arch/arm26/kernel/traps.c12
-rw-r--r--arch/cris/Kconfig8
-rw-r--r--arch/frv/Kconfig4
-rw-r--r--arch/frv/mm/mmu-context.c6
-rw-r--r--arch/h8300/Kconfig8
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/Makefile9
-rw-r--r--arch/i386/boot/Makefile36
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c9
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c13
-rw-r--r--arch/i386/kernel/dmi_scan.c90
-rw-r--r--arch/i386/kernel/efi.c23
-rw-r--r--arch/i386/kernel/kprobes.c253
-rw-r--r--arch/i386/kernel/microcode.c7
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/setup.c8
-rw-r--r--arch/i386/kernel/traps.c17
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/ia32/ia32priv.h4
-rw-r--r--arch/ia64/ia32/sys_ia32.c75
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/efi.c62
-rw-r--r--arch/ia64/kernel/kprobes.c51
-rw-r--r--arch/ia64/kernel/mca.c3
-rw-r--r--arch/ia64/kernel/process.c8
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/lib/bitop.c88
-rw-r--r--arch/ia64/mm/Makefile2
-rw-r--r--arch/ia64/mm/ioremap.c43
-rw-r--r--arch/ia64/sn/kernel/setup.c5
-rw-r--r--arch/m32r/Kconfig8
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/bvme6000/config.c2
-rw-r--r--arch/m68knommu/Kconfig8
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/kernel/linux32.c74
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/lasat/sysctl.c63
-rw-r--r--arch/parisc/Kconfig8
-rw-r--r--arch/parisc/kernel/sys_parisc32.c58
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/kprobes.c66
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c73
-rw-r--r--arch/powerpc/mm/imalloc.c18
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c22
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c7
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/crypto/crypt_s390_query.c2
-rw-r--r--arch/s390/kernel/compat_linux.c74
-rw-r--r--arch/s390/kernel/compat_wrapper.S8
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/sh/Kconfig8
-rw-r--r--arch/sh64/Kconfig8
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc64/Kconfig8
-rw-r--r--arch/sparc64/kernel/kprobes.c69
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c5
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c74
-rw-r--r--arch/sparc64/kernel/systbls.S2
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/find_bit.c127
-rw-r--r--arch/um/Kconfig.i3865
-rw-r--r--arch/um/Kconfig.x86_645
-rw-r--r--arch/v850/Kconfig6
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/Makefile17
-rw-r--r--arch/x86_64/boot/Makefile36
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c77
-rw-r--r--arch/x86_64/kernel/kprobes.c65
-rw-r--r--arch/x86_64/kernel/process.c9
-rw-r--r--arch/xtensa/Kconfig8
-rw-r--r--block/Kconfig9
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--drivers/acpi/osl.c64
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/block/Kconfig15
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/floppy.c23
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/pktcdvd.c26
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c48
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c554
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1057
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/tlclk.c1
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/amd76x_edac.c126
-rw-r--r--drivers/edac/e752x_edac.c354
-rw-r--r--drivers/edac/e7xxx_edac.c228
-rw-r--r--drivers/edac/edac_mc.c808
-rw-r--r--drivers/edac/edac_mc.h133
-rw-r--r--drivers/edac/i82860_edac.c127
-rw-r--r--drivers/edac/i82875p_edac.c208
-rw-r--r--drivers/edac/r82600_edac.c140
-rw-r--r--drivers/firmware/efivars.c28
-rw-r--r--drivers/firmware/pcdp.c19
-rw-r--r--drivers/ieee1394/highlevel.c3
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/gigaset/Kconfig42
-rw-r--r--drivers/isdn/gigaset/Makefile6
-rw-r--r--drivers/isdn/gigaset/asyncdata.c597
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c2365
-rw-r--r--drivers/isdn/gigaset/common.c1203
-rw-r--r--drivers/isdn/gigaset/ev-layer.c1983
-rw-r--r--drivers/isdn/gigaset/gigaset.h938
-rw-r--r--drivers/isdn/gigaset/i4l.c567
-rw-r--r--drivers/isdn/gigaset/interface.c718
-rw-r--r--drivers/isdn/gigaset/isocdata.c1009
-rw-r--r--drivers/isdn/gigaset/proc.c81
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c1008
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h4
-rw-r--r--drivers/isdn/i4l/Kconfig1
-rw-r--r--drivers/macintosh/smu.c9
-rw-r--r--drivers/md/bitmap.c14
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-io.c13
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c14
-rw-r--r--drivers/md/dm-snap.c3
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/kcopyd.c19
-rw-r--r--drivers/md/multipath.c17
-rw-r--r--drivers/message/i2o/i2o_block.c7
-rw-r--r--drivers/net/3c59x.c245
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/wan/dscc4.c7
-rw-r--r--drivers/parport/share.c19
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c12
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c19
-rw-r--r--drivers/pnp/isapnp/core.c7
-rw-r--r--drivers/s390/char/raw3270.c39
-rw-r--r--drivers/s390/scsi/zfcp_aux.c60
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/telephony/phonedev.c21
-rw-r--r--fs/afs/file.c6
-rw-r--r--fs/bio.c25
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/buffer.c36
-rw-r--r--fs/cifs/cifsfs.c18
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cramfs/inode.c31
-rw-r--r--fs/dcache.c14
-rw-r--r--fs/dcookies.c25
-rw-r--r--fs/direct-io.c27
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext2/inode.c14
-rw-r--r--fs/ext3/balloc.c109
-rw-r--r--fs/ext3/dir.c5
-rw-r--r--fs/ext3/inode.c582
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/hfs/inode.c13
-rw-r--r--fs/hfsplus/inode.c13
-rw-r--r--fs/inode.c8
-rw-r--r--fs/inotify.c12
-rw-r--r--fs/jbd/transaction.c13
-rw-r--r--fs/jffs2/compr_zlib.c19
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_logmgr.c27
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/lockd/host.c19
-rw-r--r--fs/lockd/svc.c17
-rw-r--r--fs/lockd/svcsubs.c17
-rw-r--r--fs/locks.c41
-rw-r--r--fs/mpage.c104
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c12
-rw-r--r--fs/nfsd/nfs4state.c47
-rw-r--r--fs/ntfs/logfile.c4
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs/ntfs.h29
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/journal.c10
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/partitions/devfs.c12
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/array.c5
-rw-r--r--fs/proc/generic.c32
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/super.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
-rw-r--r--include/asm-alpha/bitops.h133
-rw-r--r--include/asm-alpha/fpu.h4
-rw-r--r--include/asm-arm/bitops.h175
-rw-r--r--include/asm-arm26/bitops.h146
-rw-r--r--include/asm-cris/bitops.h235
-rw-r--r--include/asm-frv/bitops.h174
-rw-r--r--include/asm-generic/bitops.h76
-rw-r--r--include/asm-generic/bitops/__ffs.h43
-rw-r--r--include/asm-generic/bitops/atomic.h191
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h22
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h18
-rw-r--r--include/asm-generic/bitops/ffs.h41
-rw-r--r--include/asm-generic/bitops/ffz.h12
-rw-r--r--include/asm-generic/bitops/find.h13
-rw-r--r--include/asm-generic/bitops/fls.h41
-rw-r--r--include/asm-generic/bitops/fls64.h14
-rw-r--r--include/asm-generic/bitops/hweight.h11
-rw-r--r--include/asm-generic/bitops/le.h53
-rw-r--r--include/asm-generic/bitops/minix-le.h17
-rw-r--r--include/asm-generic/bitops/minix.h15
-rw-r--r--include/asm-generic/bitops/non-atomic.h111
-rw-r--r--include/asm-generic/bitops/sched.h36
-rw-r--r--include/asm-h8300/bitops.h222
-rw-r--r--include/asm-h8300/types.h3
-rw-r--r--include/asm-i386/bitops.h55
-rw-r--r--include/asm-i386/kprobes.h6
-rw-r--r--include/asm-i386/stat.h3
-rw-r--r--include/asm-i386/types.h5
-rw-r--r--include/asm-ia64/bitops.h67
-rw-r--r--include/asm-ia64/dmi.h6
-rw-r--r--include/asm-ia64/io.h22
-rw-r--r--include/asm-ia64/sn/sn_sal.h2
-rw-r--r--include/asm-m32r/bitops.h457
-rw-r--r--include/asm-m68k/bitops.h42
-rw-r--r--include/asm-m68k/stat.h3
-rw-r--r--include/asm-m68knommu/bitops.h221
-rw-r--r--include/asm-mips/bitops.h465
-rw-r--r--include/asm-mips/types.h5
-rw-r--r--include/asm-parisc/bitops.h286
-rw-r--r--include/asm-powerpc/bitops.h105
-rw-r--r--include/asm-powerpc/types.h5
-rw-r--r--include/asm-s390/bitops.h48
-rw-r--r--include/asm-s390/types.h5
-rw-r--r--include/asm-sh/bitops.h348
-rw-r--r--include/asm-sh/stat.h8
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh/types.h5
-rw-r--r--include/asm-sh64/bitops.h390
-rw-r--r--include/asm-sparc/bitops.h388
-rw-r--r--include/asm-sparc64/bitops.h219
-rw-r--r--include/asm-v850/bitops.h220
-rw-r--r--include/asm-x86_64/bitops.h42
-rw-r--r--include/asm-xtensa/bitops.h340
-rw-r--r--include/linux/bitops.h124
-rw-r--r--include/linux/buffer_head.h26
-rw-r--r--include/linux/compat.h28
-rw-r--r--include/linux/efi.h20
-rw-r--r--include/linux/ext3_fs.h11
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/gameport.h4
-rw-r--r--include/linux/gigaset_dev.h32
-rw-r--r--include/linux/highmem.h12
-rw-r--r--include/linux/hpet.h36
-rw-r--r--include/linux/hrtimer.h41
-rw-r--r--include/linux/i2o.h4
-rw-r--r--include/linux/ipmi.h3
-rw-r--r--include/linux/ipmi_msgdefs.h1
-rw-r--r--include/linux/ipmi_smi.h47
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/ktime.h20
-rw-r--r--include/linux/mempool.h38
-rw-r--r--include/linux/proc_fs.h5
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/serio.h6
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/stat.h2
-rw-r--r--include/linux/statfs.h10
-rw-r--r--include/linux/time.h18
-rw-r--r--include/linux/timer.h3
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/types.h4
-rw-r--r--init/initramfs.c2
-rw-r--r--init/main.c19
-rw-r--r--ipc/compat.c2
-rw-r--r--ipc/mqueue.c4
-rw-r--r--ipc/msg.c18
-rw-r--r--ipc/sem.c34
-rw-r--r--ipc/shm.c30
-rw-r--r--ipc/util.c29
-rw-r--r--ipc/util.h4
-rw-r--r--kernel/compat.c59
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hrtimer.c193
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/itimer.c14
-rw-r--r--kernel/kprobes.c10
-rw-r--r--kernel/posix-timers.c67
-rw-r--r--kernel/power/swap.c7
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/time.c4
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c19
-rw-r--r--lib/find_next_bit.c177
-rw-r--r--lib/hweight.c53
-rw-r--r--mm/highmem.c23
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempool.c42
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c11
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c9
-rw-r--r--net/netlink/genetlink.c9
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--sound/oss/cmpci.c2
-rw-r--r--sound/oss/sonicvibes.c18
-rw-r--r--sound/oss/vwsnd.c40
334 files changed, 16759 insertions, 9072 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 4ae418889b8..53245c429f7 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -362,6 +362,27 @@ maps this page at its virtual address.
likely that you will need to flush the instruction cache
for copy_to_user_page().
+ void flush_anon_page(struct page *page, unsigned long vmaddr)
+ When the kernel needs to access the contents of an anonymous
+ page, it calls this function (currently only
+ get_user_pages()). Note: flush_dcache_page() deliberately
+ doesn't work for an anonymous page. The default
+ implementation is a nop (and should remain so for all coherent
+ architectures). For incoherent architectures, it should flush
+ the cache of the page at vmaddr in the current user process.
+
+ void flush_kernel_dcache_page(struct page *page)
+ When the kernel needs to modify a user page is has obtained
+ with kmap, it calls this function after all modifications are
+ complete (but before kunmapping it) to bring the underlying
+ page up to date. It is assumed here that the user has no
+ incoherent cached copies (i.e. the original page was obtained
+ from a mechanism like get_user_pages()). The default
+ implementation is a nop and should remain so on all coherent
+ architectures. On incoherent architectures, this should flush
+ the kernel cache for page (using page_address(page)).
+
+
void flush_icache_range(unsigned long start, unsigned long end)
When the kernel stores into addresses that it will execute
out of (eg when loading modules), this function is called.
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt
index d37191fe568..70d96a62e5e 100644
--- a/Documentation/drivers/edac/edac.txt
+++ b/Documentation/drivers/edac/edac.txt
@@ -21,7 +21,7 @@ within the computer system. In the initial release, memory Correctable Errors
Detecting CE events, then harvesting those events and reporting them,
CAN be a predictor of future UE events. With CE events, the system can
-continue to operate, but with less safety. Preventive maintainence and
+continue to operate, but with less safety. Preventive maintenance and
proactive part replacement of memory DIMMs exhibiting CEs can reduce
the likelihood of the dreaded UE events and system 'panics'.
@@ -29,13 +29,13 @@ the likelihood of the dreaded UE events and system 'panics'.
In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices
in order to determine if errors are occurring on data transfers.
The presence of PCI Parity errors must be examined with a grain of salt.
-There are several addin adapters that do NOT follow the PCI specification
+There are several add-in adapters that do NOT follow the PCI specification
with regards to Parity generation and reporting. The specification says
the vendor should tie the parity status bits to 0 if they do not intend
to generate parity. Some vendors do not do this, and thus the parity bit
can "float" giving false positives.
-The PCI Parity EDAC device has the ability to "skip" known flakey
+The PCI Parity EDAC device has the ability to "skip" known flaky
cards during the parity scan. These are set by the parity "blacklist"
interface in the sysfs for PCI Parity. (See the PCI section in the sysfs
section below.) There is also a parity "whitelist" which is used as
@@ -101,7 +101,7 @@ Memory Controller (mc) Model
First a background on the memory controller's model abstracted in EDAC.
Each mc device controls a set of DIMM memory modules. These modules are
-layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can
+laid out in a Chip-Select Row (csrowX) and Channel table (chX). There can
be multiple csrows and two channels.
Memory controllers allow for several csrows, with 8 csrows being a typical value.
@@ -131,7 +131,7 @@ for memory DIMMs:
DIMM_B1
Labels for these slots are usually silk screened on the motherboard. Slots
-labeled 'A' are channel 0 in this example. Slots labled 'B'
+labeled 'A' are channel 0 in this example. Slots labeled 'B'
are channel 1. Notice that there are two csrows possible on a
physical DIMM. These csrows are allocated their csrow assignment
based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM
@@ -140,7 +140,7 @@ is placed in each Channel, the csrows cross both DIMMs.
Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
will have 1 csrow, csrow0. csrow1 will be empty. On the other hand,
-when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and
+when 2 dual ranked DIMMs are similarly placed, then both csrow0 and
csrow1 will be populated. The pattern repeats itself for csrow2 and
csrow3.
@@ -246,7 +246,7 @@ Module Version read-only attribute file:
'mc_version'
- The EDAC CORE modules's version and compile date are shown here to
+ The EDAC CORE module's version and compile date are shown here to
indicate what EDAC is running.
@@ -423,7 +423,7 @@ Total memory managed by this csrow attribute file:
'size_mb'
This attribute file displays, in count of megabytes, of memory
- that this csrow contatins.
+ that this csrow contains.
Memory Type attribute file:
@@ -557,7 +557,7 @@ On Header Type 00 devices the primary status is looked at
for any parity error regardless of whether Parity is enabled on the
device. (The spec indicates parity is generated in some cases).
On Header Type 01 bridges, the secondary status register is also
-looked at to see if parity ocurred on the bus on the other side of
+looked at to see if parity occurred on the bus on the other side of
the bridge.
@@ -588,7 +588,7 @@ Panic on PCI PARITY Error:
'panic_on_pci_parity'
- This control files enables or disables panic'ing when a parity
+ This control files enables or disables panicking when a parity
error has been detected.
@@ -616,12 +616,12 @@ PCI Device Whitelist:
This control file allows for an explicit list of PCI devices to be
scanned for parity errors. Only devices found on this list will
- be examined. The list is a line of hexadecimel VENDOR and DEVICE
+ be examined. The list is a line of hexadecimal VENDOR and DEVICE
ID tuples:
1022:7450,1434:16a6
- One or more can be inserted, seperated by a comma.
+ One or more can be inserted, separated by a comma.
To write the above list doing the following as one command line:
@@ -639,11 +639,11 @@ PCI Device Blacklist:
This control file allows for a list of PCI devices to be
skipped for scanning.
- The list is a line of hexadecimel VENDOR and DEVICE ID tuples:
+ The list is a line of hexadecimal VENDOR and DEVICE ID tuples:
1022:7450,1434:16a6
- One or more can be inserted, seperated by a comma.
+ One or more can be inserted, separated by a comma.
To write the above list doing the following as one command line:
@@ -651,14 +651,14 @@ PCI Device Blacklist:
> /sys/devices/system/edac/pci/pci_parity_blacklist
- To display what the whitelist current contatins,
+ To display what the whitelist currently contains,
simply 'cat' the same file.
=======================================================================
PCI Vendor and Devices IDs can be obtained with the lspci command. Using
the -n option lspci will display the vendor and device IDs. The system
-adminstrator will have to determine which devices should be scanned or
+administrator will have to determine which devices should be scanned or
skipped.
@@ -669,5 +669,5 @@ Turn OFF a whitelist by an empty echo command:
echo > /sys/devices/system/edac/pci/pci_parity_whitelist
-and any previous blacklist will be utililzed.
+and any previous blacklist will be utilized.
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 3759acf95b2..6091e5f6794 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -24,36 +24,44 @@ Since kernel 2.3.99-pre6, this driver incorporates the support for the
This driver supports the following hardware:
- 3c590 Vortex 10Mbps
- 3c592 EISA 10mbps Demon/Vortex
- 3c597 EISA Fast Demon/Vortex
- 3c595 Vortex 100baseTx
- 3c595 Vortex 100baseT4
- 3c595 Vortex 100base-MII
- 3Com Vortex
- 3c900 Boomerang 10baseT
- 3c900 Boomerang 10Mbps Combo
- 3c900 Cyclone 10Mbps TPO
- 3c900B Cyclone 10Mbps T
- 3c900 Cyclone 10Mbps Combo
- 3c900 Cyclone 10Mbps TPC
- 3c900B-FL Cyclone 10base-FL
- 3c905 Boomerang 100baseTx
- 3c905 Boomerang 100baseT4
- 3c905B Cyclone 100baseTx
- 3c905B Cyclone 10/100/BNC
- 3c905B-FX Cyclone 100baseFx
- 3c905C Tornado
- 3c980 Cyclone
- 3cSOHO100-TX Hurricane
- 3c555 Laptop Hurricane
- 3c575 Boomerang CardBus
- 3CCFE575 Cyclone CardBus
- 3CCFE575CT Cyclone CardBus
- 3CCFE656 Cyclone CardBus
- 3CCFEM656 Cyclone CardBus
- 3c450 Cyclone/unknown
-
+ 3c590 Vortex 10Mbps
+ 3c592 EISA 10Mbps Demon/Vortex
+ 3c597 EISA Fast Demon/Vortex
+ 3c595 Vortex 100baseTx
+ 3c595 Vortex 100baseT4
+ 3c595 Vortex 100base-MII
+ 3c900 Boomerang 10baseT
+ 3c900 Boomerang 10Mbps Combo
+ 3c900 Cyclone 10Mbps TPO
+ 3c900 Cyclone 10Mbps Combo
+ 3c900 Cyclone 10Mbps TPC
+ 3c900B-FL Cyclone 10base-FL
+ 3c905 Boomerang 100baseTx
+ 3c905 Boomerang 100baseT4
+ 3c905B Cyclone 100baseTx
+ 3c905B Cyclone 10/100/BNC
+ 3c905B-FX Cyclone 100baseFx
+ 3c905C Tornado
+ 3c920B-EMB-WNM (ATI Radeon 9100 IGP)
+ 3c980 Cyclone
+ 3c980C Python-T
+ 3cSOHO100-TX Hurricane
+ 3c555 Laptop Hurricane
+ 3c556 Laptop Tornado
+ 3c556B Laptop Hurricane
+ 3c575 [Megahertz] 10/100 LAN CardBus
+ 3c575 Boomerang CardBus
+ 3CCFE575BT Cyclone CardBus
+ 3CCFE575CT Tornado CardBus
+ 3CCFE656 Cyclone CardBus
+ 3CCFEM656B Cyclone+Winmodem CardBus
+ 3CXFEM656C Tornado+Winmodem CardBus
+ 3c450 HomePNA Tornado
+ 3c920 Tornado
+ 3c982 Hydra Dual Port A
+ 3c982 Hydra Dual Port B
+ 3c905B-T4
+ 3c920B-EMB-WNM Tornado
Module parameters
=================
@@ -293,11 +301,6 @@ Donald's wake-on-LAN page:
http://www.scyld.com/wakeonlan.html
-3Com's documentation for many NICs, including the ones supported by
-this driver is available at
-
- http://support.3com.com/partners/developer/developer_form.html
-
3Com's DOS-based application for setting up the NICs EEPROMs:
ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe
@@ -312,10 +315,10 @@ Autonegotiation notes
---------------------
The driver uses a one-minute heartbeat for adapting to changes in
- the external LAN environment. This means that when, for example, a
- machine is unplugged from a hubbed 10baseT LAN plugged into a
- switched 100baseT LAN, the throughput will be quite dreadful for up
- to sixty seconds. Be patient.
+ the external LAN environment if link is up and 5 seconds if link is down.
+ This means that when, for example, a machine is unplugged from a hubbed
+ 10baseT LAN plugged into a switched 100baseT LAN, the throughput
+ will be quite dreadful for up to sixty seconds. Be patient.
Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>:
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
index 6dc9d9f622c..6feef9e82b6 100644
--- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
@@ -2836,7 +2836,7 @@ struct _snd_pcm_runtime {
<para>
Note that this callback became non-atomic since the recent version.
- You can use schedule-related fucntions safely in this callback now.
+ You can use schedule-related functions safely in this callback now.
</para>
<para>
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e8fbbc5566..f27846734b0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -882,13 +882,34 @@ W: http://ebtables.sourceforge.net/
S: Maintained
EDAC-CORE
-P: Doug Thompson
-M: norsk5@xmission.com, dthompson@linuxnetworx.com
-P: Dave Peterson
-M: dsp@llnl.gov, dave_peterson@pobox.com
-L: bluesmoke-devel@lists.sourceforge.net
-W: bluesmoke.sourceforge.net
-S: Maintained
+P: Doug Thompson
+M: norsk5@xmission.com, dthompson@linuxnetworx.com
+P: Dave Peterson
+M: dsp@llnl.gov, dave_peterson@pobox.com
+L: bluesmoke-devel@lists.sourceforge.net
+W: bluesmoke.sourceforge.net
+S: Maintained
+
+EDAC-E752X
+P: Dave Peterson
+M: dsp@llnl.gov, dave_peterson@pobox.com
+L: bluesmoke-devel@lists.sourceforge.net
+W: bluesmoke.sourceforge.net
+S: Maintained
+
+EDAC-E7XXX
+P: Dave Peterson
+M: dsp@llnl.gov, dave_peterson@pobox.com
+L: bluesmoke-devel@lists.sourceforge.net
+W: bluesmoke.sourceforge.net
+S: Maintained
+
+EDAC-R82600
+P: Tim Small
+M: tim@buttersideup.com
+L: bluesmoke-devel@lists.sourceforge.net
+W: bluesmoke.sourceforge.net
+S: Maintained
EEPRO100 NETWORK DRIVER
P: Andrey V. Savochkin
@@ -1039,6 +1060,15 @@ M: khc@pm.waw.pl
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
S: Maintained
+GIGASET ISDN DRIVERS
+P: Hansjoerg Lipp
+M: hjlipp@web.de
+P: Tilman Schmidt
+M: tilman@imap.cc
+L: gigaset307x-common@lists.sourceforge.net
+W: http://gigaset307x.sourceforge.net/
+S: Maintained
+
HARDWARE MONITORING
P: Jean Delvare
M: khali@linux-fr.org
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index eedf41bf705..9bef61b3036 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -25,6 +25,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
@@ -447,6 +451,10 @@ config ALPHA_IRONGATE
depends on ALPHA_NAUTILUS
default y
+config GENERIC_HWEIGHT
+ bool
+ default y if !ALPHA_EV6 && !ALPHA_EV67
+
config ALPHA_AVANTI
bool
depends on ALPHA_XL || ALPHA_AVANTI_CH
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 7fb14f42a12..31afe3d91ac 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -821,7 +821,6 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
affects all sorts of things, like timeval and itimerval. */
extern struct timezone sys_tz;
-extern int do_adjtimex(struct timex *);
struct timeval32
{
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index a8e843dbcc2..1a5f71b9d8b 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -84,7 +84,7 @@ $last_quad:
beq $2, $not_found # U : U L U L
$found_it:
-#if defined(__alpha_fix__) && defined(__alpha_cix__)
+#ifdef CONFIG_ALPHA_EV67
/*
* Since we are guaranteed to have set one of the bits, we don't
* have to worry about coming back with a 0x40 out of cttz...
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 97c4d9d7a4d..05017ba34c3 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -4,7 +4,7 @@
* (C) Copyright 1998 Linus Torvalds
*/
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
#else
#define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val));
@@ -53,7 +53,7 @@ alpha_read_fp_reg (unsigned long reg)
return val;
}
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
#else
#define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val));
@@ -98,7 +98,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
}
}
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
#else
#define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val));
@@ -147,7 +147,7 @@ alpha_read_fp_reg_s (unsigned long reg)
return val;
}
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
#else
#define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0dd24ebdf6a..bf2e72698d0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -53,6 +53,10 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index dee23d87fc5..cf4ebf4c274 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -41,6 +41,10 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index 5847ea5d774..a79de041b50 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -34,7 +34,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "ptrace.h"
@@ -207,19 +207,19 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err)
die(str, regs, err);
}
-static DECLARE_MUTEX(undef_sem);
+static DEFINE_MUTEX(undef_mutex);
static int (*undef_hook)(struct pt_regs *);
int request_undef_hook(int (*fn)(struct pt_regs *))
{
int ret = -EBUSY;
- down(&undef_sem);
+ mutex_lock(&undef_mutex);
if (undef_hook == NULL) {
undef_hook = fn;
ret = 0;
}
- up(&undef_sem);
+ mutex_unlock(&undef_mutex);
return ret;
}
@@ -228,12 +228,12 @@ int release_undef_hook(int (*fn)(struct pt_regs *))
{
int ret = -EINVAL;
- down(&undef_sem);
+ mutex_lock(&undef_mutex);
if (undef_hook == fn) {
undef_hook = NULL;
ret = 0;
}
- up(&undef_sem);
+ mutex_unlock(&undef_mutex);
return ret;
}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index b8326194973..856b665020e 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -16,6 +16,14 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index e0838371237..95a3892b8d1 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -17,6 +17,10 @@ config GENERIC_FIND_NEXT_BIT
bool
default y
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default n
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index f2c6866fc88..1530a4111e6 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx)
/* find the first unallocated context number
* - 0 is reserved for the kernel
*/
- cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1);
+ cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1);
if (cxn < NR_CXN) {
- set_bit(cxn, &cxn_bitmap);
+ set_bit(cxn, cxn_bitmap);
}
else {
/* none remaining - need to steal someone else's cxn */
@@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm)
cxn_pinned = -1;
list_del_init(&ctx->id_link);
- clear_bit(ctx->id, &cxn_bitmap);
+ clear_bit(ctx->id, cxn_bitmap);
__flush_tlb_mm(ctx->id);
ctx->id = 0;
}
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 98308b018a3..cabf0bfffc5 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -29,6 +29,14 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default n
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index b008fb0cd7b..f7db71d0b91 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -37,6 +37,10 @@ config GENERIC_IOMAP
bool
default y
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config ARCH_MAY_HAVE_PC_FDC
bool
default y
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index c848a5b3039..3e4adb1e224 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -103,7 +103,7 @@ AFLAGS += $(mflags-y)
boot := arch/i386/boot
PHONY += zImage bzImage compressed zlilo bzlilo \
- zdisk bzdisk fdimage fdimage144 fdimage288 install
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
all: bzImage
@@ -122,7 +122,7 @@ zlilo bzlilo: vmlinux
zdisk bzdisk: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
-fdimage fdimage144 fdimage288: vmlinux
+fdimage fdimage144 fdimage288 isoimage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
install:
@@ -139,6 +139,9 @@ define archhelp
echo ' install to $$(INSTALL_PATH) and run lilo'
echo ' bzdisk - Create a boot floppy in /dev/fd0'
echo ' fdimage - Create a boot floppy image'
+ echo ' isoimage - Create a boot CD-ROM image'
endef
-CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf
+CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
+ arch/$(ARCH)/boot/image.iso \
+ arch/$(ARCH)/boot/mtools.conf
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index f136752563b..33e55476381 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -62,8 +62,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE
$(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@
-# Set this if you want to pass append arguments to the zdisk/fdimage kernel
+# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel
FDARGS =
+# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel
+FDINITRD =
+
+image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
@@ -72,8 +76,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
syslinux /dev/fd0 ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync
# These require being root or having syslinux 2.02 or higher installed
@@ -81,18 +88,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
syslinux $(obj)/fdimage ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync
fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
syslinux $(obj)/fdimage ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync
+isoimage: $(BOOTIMAGE)
+ -rm -rf $(obj)/isoimage
+ mkdir $(obj)/isoimage
+ cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
+ $(obj)/isoimage
+ cp $(BOOTIMAGE) $(obj)/isoimage/linux
+ echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
+ fi
+ mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
+ -no-emul-boot -boot-load-size 4 -boot-info-table \
+ $(obj)/isoimage
+ rm -rf $(obj)/isoimage
+
zlilo: $(BOOTIMAGE)
if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index f1a21945963..033066176b3 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -668,10 +668,10 @@ unsigned long __init acpi_find_rsdp(void)
unsigned long rsdp_phys = 0;
if (efi_enabled) {
- if (efi.acpi20)
- return __pa(efi.acpi20);
- else if (efi.acpi)
- return __pa(efi.acpi);
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi;
}
/*
* Scan memory looking for the RSDP signature. First search EBDA (low
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e5bc06480ff..1e70823e1cb 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -40,6 +40,7 @@
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
#include <linux/acpi.h>
+#include <linux/mutex.h>
#include <acpi/processor.h>
#endif
@@ -49,7 +50,7 @@
#include "powernow-k8.h"
/* serialize freq changes */
-static DECLARE_MUTEX(fidvid_sem);
+static DEFINE_MUTEX(fidvid_mutex);
static struct powernow_k8_data *powernow_data[NR_CPUS];
@@ -943,17 +944,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
goto err_out;
- down(&fidvid_sem);
+ mutex_lock(&fidvid_mutex);
powernow_k8_acpi_pst_values(data, newstate);
if (transition_frequency(data, newstate)) {
printk(KERN_ERR PFX "transition frequency failed\n");
ret = 1;
- up(&fidvid_sem);
+ mutex_unlock(&fidvid_mutex);
goto err_out;
}
- up(&fidvid_sem);
+ mutex_unlock(&fidvid_mutex);
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 3b4618bed70..fff90bda473 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/mutex.h>
#include <asm/mtrr.h>
@@ -47,7 +48,7 @@
u32 num_var_ranges = 0;
unsigned int *usage_table;
-static DECLARE_MUTEX(mtrr_sem);
+static DEFINE_MUTEX(mtrr_mutex);
u32 size_or_mask, size_and_mask;
@@ -333,7 +334,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
/* Search for existing MTRR */
- down(&mtrr_sem);
+ mutex_lock(&mtrr_mutex);
for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
if (base >= lbase + lsize)
@@ -371,7 +372,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
printk(KERN_INFO "mtrr: no more MTRRs available\n");
error = i;
out:
- up(&mtrr_sem);
+ mutex_unlock(&mtrr_mutex);
unlock_cpu_hotplug();
return error;
}
@@ -464,7 +465,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
max = num_var_ranges;
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
- down(&mtrr_sem);
+ mutex_lock(&mtrr_mutex);
if (reg < 0) {
/* Search for existing MTRR */
for (i = 0; i < max; ++i) {
@@ -503,7 +504,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
set_mtrr(reg, 0, 0, 0);
error = reg;
out:
- up(&mtrr_sem);
+ mutex_unlock(&mtrr_mutex);
unlock_cpu_hotplug();
return error;
}
@@ -685,7 +686,7 @@ void mtrr_ap_init(void)
if (!mtrr_if || !use_intel())
return;
/*
- * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
+ * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
* but this routine will be called in cpu boot time, holding the lock
* breaks it. This routine is called in two cases: 1.very earily time
* of software resume, when there absolutely isn't mtrr entry changes;
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index ebc8dc116c4..5efceebc48d 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/dmi.h>
+#include <linux/efi.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <asm/dmi.h>
@@ -185,47 +186,72 @@ static void __init dmi_decode(struct dmi_header *dm)
}
}
-void __init dmi_scan_machine(void)
+static int __init dmi_present(char __iomem *p)
{
u8 buf[15];
- char __iomem *p, *q;
+ memcpy_fromio(buf, p, 15);
+ if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ u16 num = (buf[13] << 8) | buf[12];
+ u16 len = (buf[7] << 8) | buf[6];
+ u32 base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
- /*
- * no iounmap() for that ioremap(); it would be a no-op, but it's
- * so early in setup that sucker gets confused into doing what
- * it shouldn't if we actually call it.
- */
- p = ioremap(0xF0000, 0x10000);
- if (p == NULL)
- goto out;
-
- for (q = p; q < p + 0x10000; q += 16) {
- memcpy_fromio(buf, q, 15);
- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
- u16 num = (buf[13] << 8) | buf[12];
- u16 len = (buf[7] << 8) | buf[6];
- u32 base = (buf[11] << 24) | (buf[10] << 16) |
- (buf[9] << 8) | buf[8];
-
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if (buf[14] != 0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14] >> 4, buf[14] & 0xF);
- else
- printk(KERN_INFO "DMI present.\n");
+ /*
+ * DMI version 0.0 means that the real version is taken from
+ * the SMBIOS version, which we don't know at this point.
+ */
+ if (buf[14] != 0)
+ printk(KERN_INFO "DMI %d.%d present.\n",
+ buf[14] >> 4, buf[14] & 0xF);
+ else
+ printk(KERN_INFO "DMI present.\n");
+ if (dmi_table(base,len, num, dmi_decode) == 0)
+ return 0;
+ }
+ return 1;
+}
- if (dmi_table(base,len, num, dmi_decode) == 0)
+void __init dmi_scan_machine(void)
+{
+ char __iomem *p, *q;
+ int rc;
+
+ if (efi_enabled) {
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto out;
+
+ /* This is called as a core_initcall() because it isn't
+ * needed during early boot. This also means we can
+ * iounmap the space when we're done with it.
+ */
+ p = dmi_ioremap(efi.smbios, 32);
+ if (p == NULL)
+ goto out;
+
+ rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
+ dmi_iounmap(p, 32);
+ if (!rc)
+ return;
+ }
+ else {
+ /*
+ * no iounmap() for that ioremap(); it would be a no-op, but
+ * it's so early in setup that sucker gets confused into doing
+ * what it shouldn't if we actually call it.
+ */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto out;
+
+ for (q = p; q < p + 0x10000; q += 16) {
+ rc = dmi_present(q);
+ if (!rc)
return;
}
}
-
-out: printk(KERN_INFO "DMI not present or invalid.\n");
+ out: printk(KERN_INFO "DMI not present or invalid.\n");
}
-
/**
* dmi_check_system - check system DMI data
* @list: array of dmi_system_id structures to match against
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 7ec6cfa01fb..9202b67c4b2 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -361,7 +361,7 @@ void __init efi_init(void)
*/
c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2);
if (c16) {
- for (i = 0; i < sizeof(vendor) && *c16; ++i)
+ for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i)
vendor[i] = *c16++;
vendor[i] = '\0';
} else
@@ -381,29 +381,38 @@ void __init efi_init(void)
if (config_tables == NULL)
printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n");
+ efi.mps = EFI_INVALID_TABLE_ADDR;
+ efi.acpi = EFI_INVALID_TABLE_ADDR;
+ efi.acpi20 = EFI_INVALID_TABLE_ADDR;
+ efi.smbios = EFI_INVALID_TABLE_ADDR;
+ efi.sal_systab = EFI_INVALID_TABLE_ADDR;
+ efi.boot_info = EFI_INVALID_TABLE_ADDR;
+ efi.hcdp = EFI_INVALID_TABLE_ADDR;
+ efi.uga = EFI_INVALID_TABLE_ADDR;
+
for (i = 0; i < num_config_tables; i++) {
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = (void *)config_tables[i].table;
+ efi.mps = config_tables[i].table;
printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table);
} else
if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = __va(config_tables[i].table);
+ efi.acpi20 = config_tables[i].table;
printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table);
} else
if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
- efi.acpi = __va(config_tables[i].table);
+ efi.acpi = config_tables[i].table;
printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table);
} else
if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = (void *) config_tables[i].table;
+ efi.smbios = config_tables[i].table;
printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table);
} else
if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
- efi.hcdp = (void *)config_tables[i].table;
+ efi.hcdp = config_tables[i].table;
printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table);
} else
if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) {
- efi.uga = (void *)config_tables[i].table;
+ efi.uga = config_tables[i].table;
printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table);
}
}
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 7a59050242a..f19768789e8 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -35,12 +35,56 @@
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
+#include <asm/uaccess.h>
void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+/* insert a jmp code */
+static inline void set_jmp_op(void *from, void *to)
+{
+ struct __arch_jmp_op {
+ char op;
+ long raddr;
+ } __attribute__((packed)) *jop;
+ jop = (struct __arch_jmp_op *)from;
+ jop->raddr = (long)(to) - ((long)(from) + 5);
+ jop->op = RELATIVEJUMP_INSTRUCTION;
+}
+
+/*
+ * returns non-zero if opcodes can be boosted.
+ */
+static inline int can_boost(kprobe_opcode_t opcode)
+{
+ switch (opcode & 0xf0 ) {
+ case 0x70:
+ return 0; /* can't boost conditional jump */
+ case 0x90:
+ /* can't boost call and pushf */
+ return opcode != 0x9a && opcode != 0x9c;
+ case 0xc0:
+ /* can't boost undefined opcodes and soft-interruptions */
+ return (0xc1 < opcode && opcode < 0xc6) ||
+ (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ case 0xd0:
+ /* can boost AA* and XLAT */
+ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+ case 0xe0:
+ /* can boost in/out and (may be) jmps */
+ return (0xe3 < opcode && opcode != 0xe8);
+ case 0xf0:
+ /* clear and set flags can be boost */
+ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ default:
+ /* currently, can't boost 2 bytes opcodes */
+ return opcode != 0x0f;
+ }
+}
+
+
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
@@ -65,6 +109,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
+ if (can_boost(p->opcode)) {
+ p->ainsn.boostable = 0;
+ } else {
+ p->ainsn.boostable = -1;
+ }
return 0;
}
@@ -155,9 +204,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
- kprobe_opcode_t *addr = NULL;
- unsigned long *lp;
+ kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
+#ifdef CONFIG_PREEMPT
+ unsigned pre_preempt_count = preempt_count();
+#endif /* CONFIG_PREEMPT */
+
+ addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire
@@ -166,17 +219,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_disable();
kcb = get_kprobe_ctlblk();
- /* Check if the application is using LDT entry for its code segment and
- * calculate the address by reading the base address from the LDT entry.
- */
- if ((regs->xcs & 4) && (current->mm)) {
- lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8)
- + (char *) current->mm->context.ldt);
- addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip -
- sizeof(kprobe_opcode_t));
- } else {
- addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
- }
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
@@ -252,6 +294,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
/* handler has already set things up, so skip ss setup */
return 1;
+ if (p->ainsn.boostable == 1 &&
+#ifdef CONFIG_PREEMPT
+ !(pre_preempt_count) && /*
+ * This enables booster when the direct
+ * execution path aren't preempted.
+ */
+#endif /* CONFIG_PREEMPT */
+ !p->post_handler && !p->break_handler ) {
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+ regs->eip = (unsigned long)p->ainsn.insn;
+ preempt_enable_no_resched();
+ return 1;
+ }
+
ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -267,17 +324,44 @@ no_kprobe:
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
- void kretprobe_trampoline_holder(void)
+ void __kprobes kretprobe_trampoline_holder(void)
{
- asm volatile ( ".global kretprobe_trampoline\n"
+ asm volatile ( ".global kretprobe_trampoline\n"
"kretprobe_trampoline: \n"
- "nop\n");
- }
+ " pushf\n"
+ /* skip cs, eip, orig_eax, es, ds */
+ " subl $20, %esp\n"
+ " pushl %eax\n"
+ " pushl %ebp\n"
+ " pushl %edi\n"
+ " pushl %esi\n"
+ " pushl %edx\n"
+ " pushl %ecx\n"
+ " pushl %ebx\n"
+ " movl %esp, %eax\n"
+ " call trampoline_handler\n"
+ /* move eflags to cs */
+ " movl 48(%esp), %edx\n"
+ " movl %edx, 44(%esp)\n"
+ /* save true return address on eflags */
+ " movl %eax, 48(%esp)\n"
+ " popl %ebx\n"
+ " popl %ecx\n"
+ " popl %edx\n"
+ " popl %esi\n"
+ " popl %edi\n"
+ " popl %ebp\n"
+ " popl %eax\n"
+ /* skip eip, orig_eax, es, ds */
+ " addl $16, %esp\n"
+ " popf\n"
+ " ret\n");
+}
/*
- * Called when we hit the probe point at kretprobe_trampoline
+ * Called from kretprobe_trampoline
*/
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -306,8 +390,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
- if (ri->rp && ri->rp->handler)
+ if (ri->rp && ri->rp->handler){
+ __get_cpu_var(current_kprobe) = &ri->rp->kp;
ri->rp->handler(ri, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri);
@@ -322,18 +409,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
}
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
- regs->eip = orig_ret_address;
- reset_current_kprobe();
spin_unlock_irqrestore(&kretprobe_lock, flags);
- preempt_enable_no_resched();
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
+ return (void*)orig_ret_address;
}
/*
@@ -357,15 +436,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
+ *
+ * This function also checks instruction size for preparing direct execution.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = (unsigned long *)&regs->esp;
- unsigned long next_eip = 0;
unsigned long copy_eip = (unsigned long)p->ainsn.insn;
unsigned long orig_eip = (unsigned long)p->addr;
+ regs->eflags &= ~TF_MASK;
switch (p->ainsn.insn[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
@@ -375,37 +456,51 @@ static void __kprobes resume_execution(struct kprobe *p,
case 0xcb:
case 0xc2:
case 0xca:
- regs->eflags &= ~TF_MASK;
- /* eip is already adjusted, no more changes required*/
- return;
+ case 0xea: /* jmp absolute -- eip is correct */
+ /* eip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
case 0xe8: /* call relative - Fix return addr */
*tos = orig_eip + (*tos - copy_eip);
break;
case 0xff:
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
- /* Fix return addr; eip is correct. */
- next_eip = regs->eip;
+ /*
+ * Fix return addr; eip is correct.
+ * But this is not boostable
+ */
*tos = orig_eip + (*tos - copy_eip);
+ goto no_change;
} else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* eip is correct. */
- next_eip = regs->eip;
+ /* eip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
+ goto no_change;
}
- break;
- case 0xea: /* jmp absolute -- eip is correct */
- next_eip = regs->eip;
- break;
default:
break;
}
- regs->eflags &= ~TF_MASK;
- if (next_eip) {
- regs->eip = next_eip;
- } else {
- regs->eip = orig_eip + (regs->eip - copy_eip);
+ if (p->ainsn.boostable == 0) {
+ if ((regs->eip > copy_eip) &&
+ (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
+ /*
+ * These instructions can be executed directly if it
+ * jumps back to correct address.
+ */
+ set_jmp_op((void *)regs->eip,
+ (void *)orig_eip + (regs->eip - copy_eip));
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
}
+
+ regs->eip = orig_eip + (regs->eip - copy_eip);
+
+no_change:
+ return;
}
/*
@@ -453,15 +548,57 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- resume_execution(cur, regs, kcb);
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the eip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->eip = (unsigned long)cur->addr;
regs->eflags |= kcb->kprobe_old_eflags;
-
- reset_current_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
return 0;
}
@@ -475,6 +612,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+ if (args->regs && user_mode(args->regs))
+ return ret;
+
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
@@ -564,12 +704,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
int __init arch_init_kprobes(void)
{
- return register_kprobe(&trampoline_p);
+ return 0;
}
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 55bc365b875..dd780a00553 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -81,6 +81,7 @@
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
@@ -114,7 +115,7 @@ MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(microcode_update_lock);
/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-static DECLARE_MUTEX(microcode_sem);
+static DEFINE_MUTEX(microcode_mutex);
static void __user *user_buffer; /* user area microcode data buffer */
static unsigned int user_buffer_size; /* it's size */
@@ -444,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
return -EINVAL;
}
- down(&microcode_sem);
+ mutex_lock(&microcode_mutex);
user_buffer = (void __user *) buf;
user_buffer_size = (int) len;
@@ -453,7 +454,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
if (!ret)
ret = (ssize_t)len;
- up(&microcode_sem);
+ mutex_unlock(&microcode_mutex);
return ret;
}
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 299e6167408..24b3e745478 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -38,7 +38,6 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/random.h>
-#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -364,13 +363,6 @@ void exit_thread(void)
struct task_struct *tsk = current;
struct thread_struct *t = &tsk->thread;
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(tsk);
-
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
int cpu = get_cpu();
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index d313a11acaf..6917daa159a 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1058,10 +1058,10 @@ static int __init
free_available_memory(unsigned long start, unsigned long end, void *arg)
{
/* check max_low_pfn */
- if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ if (start >= (max_low_pfn << PAGE_SHIFT))
return 0;
- if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
- end = (max_low_pfn + 1) << PAGE_SHIFT;
+ if (end >= (max_low_pfn << PAGE_SHIFT))
+ end = max_low_pfn << PAGE_SHIFT;
if (start < end)
free_bootmem(start, end - start);
@@ -1286,8 +1286,6 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
probe_roms();
for (i = 0; i < e820.nr_map; i++) {
struct resource *res;
- if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
- continue;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
switch (e820.map[i].type) {
case E820_RAM: res->name = "System RAM"; break;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index de5386b01d3..4624f8ca245 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -386,8 +386,12 @@ void die(const char * str, struct pt_regs * regs, long err)
#endif
if (nl)
printk("\n");
- notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
- show_registers(regs);
+ if (notify_die(DIE_OOPS, str, regs, err,
+ current->thread.trap_no, SIGSEGV) !=
+ NOTIFY_STOP)
+ show_registers(regs);
+ else
+ regs = NULL;
} else
printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
@@ -395,6 +399,9 @@ void die(const char * str, struct pt_regs * regs, long err)
die.lock_owner = -1;
spin_unlock_irqrestore(&die.lock, flags);
+ if (!regs)
+ return;
+
if (kexec_should_crash(current))
crash_kexec(regs);
@@ -623,7 +630,7 @@ static DEFINE_SPINLOCK(nmi_print_lock);
void die_nmi (struct pt_regs *regs, const char *msg)
{
- if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) ==
+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
NOTIFY_STOP)
return;
@@ -662,7 +669,7 @@ static void default_do_nmi(struct pt_regs * regs)
reason = get_nmi_reason();
if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
== NOTIFY_STOP)
return;
#ifdef CONFIG_X86_LOCAL_APIC
@@ -678,7 +685,7 @@ static void default_do_nmi(struct pt_regs * regs)
unknown_nmi_error(reason, regs);
return;
}
- if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
return;
if (reason & 0x80)
mem_parity_error(reason, regs);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 10b6b9e7716..edffe25a477 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -34,6 +34,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
@@ -42,6 +46,10 @@ config TIME_INTERPOLATION
bool
default y
+config DMI
+ bool
+ default y
+
config EFI
bool
default y
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 68ceb4e690c..ccb98ed48e5 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -29,9 +29,9 @@
struct partial_page {
struct partial_page *next; /* linked list, sorted by address */
struct rb_node pp_rb;
- /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32
+ /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
* should suffice.*/
- unsigned int bitmap;
+ unsigned long bitmap;
unsigned int base;
};
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 13e739e4c84..5366b3b23d0 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -25,7 +25,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -2591,78 +2590,4 @@ sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
return sys_setresgid(srgid, segid, ssgid);
}
-
-/* Handle adjtimex compatibility. */
-
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage long
-sys32_adjtimex(struct timex32 *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if(get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if(put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
#endif /* NOTYET */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 09a0dbc17fb..59e871dae74 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
- unwind.o mca.o mca_asm.o topology.o
+ unwind.o mca.o mca_asm.o topology.o dmi_scan.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
+dmi_scan-y += ../../i386/kernel/dmi_scan.o
# The gate DSO image is built using a special linker script.
targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a4e218ce2ed..58c93a30348 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -651,9 +651,9 @@ unsigned long __init acpi_find_rsdp(void)
{
unsigned long rsdp_phys = 0;
- if (efi.acpi20)
- rsdp_phys = __pa(efi.acpi20);
- else if (efi.acpi)
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ rsdp_phys = efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
printk(KERN_WARNING PREFIX
"v1.0/r0.71 tables no longer supported\n");
return rsdp_phys;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 9990320b6f9..12cfedce73b 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -458,24 +458,33 @@ efi_init (void)
printk(KERN_INFO "EFI v%u.%.02u by %s:",
efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
+ efi.mps = EFI_INVALID_TABLE_ADDR;
+ efi.acpi = EFI_INVALID_TABLE_ADDR;
+ efi.acpi20 = EFI_INVALID_TABLE_ADDR;
+ efi.smbios = EFI_INVALID_TABLE_ADDR;
+ efi.sal_systab = EFI_INVALID_TABLE_ADDR;
+ efi.boot_info = EFI_INVALID_TABLE_ADDR;
+ efi.hcdp = EFI_INVALID_TABLE_ADDR;
+ efi.uga = EFI_INVALID_TABLE_ADDR;
+
for (i = 0; i < (int) efi.systab->nr_tables; i++) {
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = __va(config_tables[i].table);
+ efi.mps = config_tables[i].table;
printk(" MPS=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = __va(config_tables[i].table);
+ efi.acpi20 = config_tables[i].table;
printk(" ACPI 2.0=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
- efi.acpi = __va(config_tables[i].table);
+ efi.acpi = config_tables[i].table;
printk(" ACPI=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = __va(config_tables[i].table);
+ efi.smbios = config_tables[i].table;
printk(" SMBIOS=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
- efi.sal_systab = __va(config_tables[i].table);
+ efi.sal_systab = config_tables[i].table;
printk(" SALsystab=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
- efi.hcdp = __va(config_tables[i].table);
+ efi.hcdp = config_tables[i].table;
printk(" HCDP=0x%lx", config_tables[i].table);
}
}
@@ -677,27 +686,34 @@ EXPORT_SYMBOL(efi_mem_attributes);
/*
* Determines whether the memory at phys_addr supports the desired
* attribute (WB, UC, etc). If this returns 1, the caller can safely
- * access *size bytes at phys_addr with the specified attribute.
+ * access size bytes at phys_addr with the specified attribute.
*/
-static int
-efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
+int
+efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr)
{
+ unsigned long end = phys_addr + size;
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
- unsigned long md_end;
- if (!md || (md->attribute & attr) != attr)
+ /*
+ * Some firmware doesn't report MMIO regions in the EFI memory
+ * map. The Intel BigSur (a.k.a. HP i2000) has this problem.
+ * On those platforms, we have to assume UC is valid everywhere.
+ */
+ if (!md || (md->attribute & attr) != attr) {
+ if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio())
+ return 1;
return 0;
+ }
do {
- md_end = efi_md_end(md);
- if (phys_addr + *size <= md_end)
+ unsigned long md_end = efi_md_end(md);
+
+ if (end <= md_end)
return 1;
md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & attr) != attr) {
- *size = md_end - phys_addr;
- return 1;
- }
+ if (!md || (md->attribute & attr) != attr)
+ return 0;
} while (md);
return 0;
}
@@ -708,7 +724,7 @@ efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
* control access size.
*/
int
-valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
}
@@ -723,7 +739,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
* because that doesn't appear in the boot-time EFI memory map.
*/
int
-valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
return 1;
@@ -731,14 +747,6 @@ valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
return 1;
- /*
- * Some firmware doesn't report MMIO regions in the EFI memory map.
- * The Intel BigSur (a.k.a. HP i2000) has this problem. In this
- * case, we can't use the EFI memory map to validate mmap requests.
- */
- if (!efi_memmap_has_mmio())
- return 1;
-
return 0;
}
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 50ae8c7d453..789881ca83d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -34,6 +34,7 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>
#include <asm/sections.h>
+#include <asm/uaccess.h>
extern void jprobe_inst_return(void);
@@ -722,13 +723,50 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- resume_execution(cur, regs);
- reset_current_kprobe();
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the instruction pointer points back to
+ * the probe address and allow the page fault handler
+ * to continue as a normal page fault.
+ */
+ regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL;
+ ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * Let ia64_do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
return 0;
@@ -740,6 +778,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+ if (args->regs && user_mode(args->regs))
+ return ret;
+
switch(val) {
case DIE_BREAK:
/* err is break number from ia64_bad_break() */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 87ff7fe33cf..8963171788d 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -69,6 +69,7 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/workqueue.h>
+#include <linux/cpumask.h>
#include <asm/delay.h>
#include <asm/kdebug.h>
@@ -1505,7 +1506,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->cpu = cpu;
p->thread_info = ti;
p->state = TASK_UNINTERRUPTIBLE;
- __set_bit(cpu, &p->cpus_allowed);
+ cpu_set(cpu, p->cpus_allowed);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
INIT_LIST_HEAD(&p->children);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 309d59658e5..355d57970ba 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -30,7 +30,6 @@
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/kprobes.h>
#include <asm/cpu.h>
#include <asm/delay.h>
@@ -738,13 +737,6 @@ void
exit_thread (void)
{
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(current);
-
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index eb388e271b2..e4dfda1eb7d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -37,6 +37,7 @@
#include <linux/string.h>
#include <linux/threads.h>
#include <linux/tty.h>
+#include <linux/dmi.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/efi.h>
@@ -433,7 +434,7 @@ setup_arch (char **cmdline_p)
find_memory();
/* process SAL system table: */
- ia64_sal_init(efi.sal_systab);
+ ia64_sal_init(__va(efi.sal_systab));
ia64_setup_printk_clock();
@@ -887,3 +888,10 @@ check_bugs (void)
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
(unsigned long) __end___mckinley_e9_bundles);
}
+
+static int __init run_dmi_scan(void)
+{
+ dmi_scan_machine();
+ return 0;
+}
+core_initcall(run_dmi_scan);
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index ac64664a180..d8536a2c22a 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -6,7 +6,7 @@ obj-y := io.o
lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
- bitop.o checksum.o clear_page.o csum_partial_copy.o \
+ checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o
diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c
deleted file mode 100644
index 82e299c8464..00000000000
--- a/arch/ia64/lib/bitop.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/intrinsics.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-
-int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (64-offset);
- if (size < 64)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* any bits zero? */
- return result + size; /* nope */
-found_middle:
- return result + ffz(tmp);
-}
-EXPORT_SYMBOL(__find_next_zero_bit);
-
-/*
- * Find next bit in a bitmap reasonably efficiently..
- */
-int __find_next_bit(const void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < 64)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp &= ~0UL >> (64-size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
- found_middle:
- return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(__find_next_bit);
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile
index d78d20f0a0f..bb0a01a8187 100644
--- a/arch/ia64/mm/Makefile
+++ b/arch/ia64/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ia64-specific parts of the memory manager.
#
-obj-y := init.o fault.o tlb.o extable.o
+obj-y := init.o fault.o tlb.o extable.o ioremap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
new file mode 100644
index 00000000000..62328621f99
--- /dev/null
+++ b/arch/ia64/mm/ioremap.c
@@ -0,0 +1,43 @@
+/*
+ * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <asm/io.h>
+
+static inline void __iomem *
+__ioremap (unsigned long offset, unsigned long size)
+{
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+}
+
+void __iomem *
+ioremap (unsigned long offset, unsigned long size)
+{
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ return __ioremap(offset, size);
+
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
+ return phys_to_virt(offset);
+
+ /*
+ * Someday this should check ACPI resources so we
+ * can do the right thing for hot-plugged regions.
+ */
+ return __ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *
+ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 8b6d5c84470..30988dfbddf 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -327,10 +327,11 @@ sn_scan_pcdp(void)
struct pcdp_interface_pci if_pci;
extern struct efi efi;
- pcdp = efi.hcdp;
- if (! pcdp)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return; /* no hcdp/pcdp table */
+ pcdp = __va(efi.hcdp);
+
if (pcdp->rev < 3)
return; /* only support PCDP (rev >= 3) */
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index a3dcc3fab4b..05c864c6c2d 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -214,6 +214,14 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default n
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 8849439e88d..805b81fedf8 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -17,6 +17,10 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 3ffc84f9c29..c90cb5fcc8e 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -142,7 +142,7 @@ void __init config_bvme6000(void)
/* Now do the PIT configuration */
pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */
- pit->psrr = 0x18; /* PIACK and PIRQ fucntions enabled */
+ pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */
pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */
pit->padr = 0x00; /* Just to be tidy! */
pit->paddr = 0x00; /* All inputs for now (safest) */
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index e50858dbc23..3cde6822ead 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -25,6 +25,14 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default n
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac2012f033d..5080ea1799a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -801,6 +801,14 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 013bc93688e..3f40c37a9ee 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -30,7 +30,6 @@
#include <linux/utime.h>
#include <linux/utsname.h>
#include <linux/personality.h>
-#include <linux/timex.h>
#include <linux/dnotify.h>
#include <linux/module.h>
#include <linux/binfmts.h>
@@ -1157,79 +1156,6 @@ out:
return err;
}
-/* Handle adjtimex compatibility. */
-
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage int sys32_adjtimex(struct timex32 __user *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if (get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if (put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
-
asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
s32 count)
{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 02c8267e45e..05a2c0567da 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -273,7 +273,7 @@ EXPORT(sysn32_call_table)
PTR sys_pivot_root
PTR sys32_sysctl
PTR sys_prctl
- PTR sys32_adjtimex
+ PTR compat_sys_adjtimex
PTR compat_sys_setrlimit /* 6155 */
PTR sys_chroot
PTR sys_sync
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 797e0d87488..19c4ca481b0 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -328,7 +328,7 @@ sys_call_table:
PTR sys_setdomainname
PTR sys32_newuname
PTR sys_ni_syscall /* sys_modify_ldt */
- PTR sys32_adjtimex
+ PTR compat_sys_adjtimex
PTR sys_mprotect /* 4125 */
PTR compat_sys_sigprocmask
PTR sys_ni_syscall /* was creat_module */
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index 8ff43a1c1e9..e3d5aaa90f0 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -30,12 +30,13 @@
#include <linux/string.h>
#include <linux/net.h>
#include <linux/inet.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include "sysctl.h"
#include "ds1603.h"
-static DECLARE_MUTEX(lasat_info_sem);
+static DEFINE_MUTEX(lasat_info_mutex);
/* Strategy function to write EEPROM after changing string entry */
int sysctl_lasatstring(ctl_table *table, int *name, int nlen,
@@ -43,17 +44,17 @@ int sysctl_lasatstring(ctl_table *table, int *name, int nlen,
void *newval, size_t newlen, void **context)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = sysctl_string(table, name,
nlen, oldval, oldlenp, newval, newlen, context);
if (r < 0) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
if (newval && newlen) {
lasat_write_eeprom_info();
}
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 1;
}
@@ -63,14 +64,14 @@ int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = proc_dostring(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
lasat_write_eeprom_info();
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
@@ -79,14 +80,14 @@ int proc_dolasatint(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
lasat_write_eeprom_info();
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
@@ -98,7 +99,7 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
if (!write) {
rtctmp = ds1603_read();
/* check for time < 0 and set to 0 */
@@ -107,11 +108,11 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
}
r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
ds1603_set(rtctmp);
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
#endif
@@ -122,16 +123,16 @@ int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen,
void *newval, size_t newlen, void **context)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
if (r < 0) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
if (newval && newlen) {
lasat_write_eeprom_info();
}
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 1;
}
@@ -142,19 +143,19 @@ int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen,
void *newval, size_t newlen, void **context)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
rtctmp = ds1603_read();
if (rtctmp < 0)
rtctmp = 0;
r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
if (r < 0) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
if (newval && newlen) {
ds1603_set(rtctmp);
}
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 1;
}
#endif
@@ -192,13 +193,13 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
return 0;
}
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
if (write) {
len = 0;
p = buffer;
while (len < *lenp) {
if(get_user(c, p++)) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return -EFAULT;
}
if (c == 0 || c == '\n')
@@ -209,7 +210,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
len = sizeof(proc_lasat_ipbuf) - 1;
if (copy_from_user(proc_lasat_ipbuf, buffer, len))
{
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return -EFAULT;
}
proc_lasat_ipbuf[len] = 0;
@@ -230,12 +231,12 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
len = *lenp;
if (len)
if(copy_to_user(buffer, proc_lasat_ipbuf, len)) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return -EFAULT;
}
if (len < *lenp) {
if(put_user('\n', ((char *) buffer) + len)) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return -EFAULT;
}
len++;
@@ -244,7 +245,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
*ppos += len;
}
update_bcastaddr();
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
#endif /* defined(CONFIG_INET) */
@@ -256,10 +257,10 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen,
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
if (r < 0) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
@@ -271,7 +272,7 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen,
lasat_write_eeprom_info();
lasat_init_board_info();
}
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
@@ -280,10 +281,10 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- down(&lasat_info_sem);
+ mutex_lock(&lasat_info_mutex);
r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return r;
}
if (filp && filp->f_dentry)
@@ -294,7 +295,7 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess;
}
lasat_write_eeprom_info();
- up(&lasat_info_sem);
+ mutex_unlock(&lasat_info_mutex);
return 0;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index eca33cfa8a4..6b3c50964ca 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -25,6 +25,14 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 61356901841..d286f68a3d3 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -21,7 +21,6 @@
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/time.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -567,63 +566,6 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
}
-struct timex32 {
- unsigned int modes; /* mode selector */
- int offset; /* time offset (usec) */
- int freq; /* frequency offset (scaled ppm) */
- int maxerror; /* maximum error (usec) */
- int esterror; /* estimated error (usec) */
- int status; /* clock command/status */
- int constant; /* pll time constant */
- int precision; /* clock precision (usec) (read only) */
- int tolerance; /* clock frequency tolerance (ppm)
- * (read only)
- */
- struct compat_timeval time; /* (read only) */
- int tick; /* (modified) usecs between clock ticks */
-
- int ppsfreq; /* pps frequency (scaled ppm) (ro) */
- int jitter; /* pps jitter (us) (ro) */
- int shift; /* interval duration (s) (shift) (ro) */
- int stabil; /* pps stability (scaled ppm) (ro) */
- int jitcnt; /* jitter limit exceeded (ro) */
- int calcnt; /* calibration intervals (ro) */
- int errcnt; /* calibration errors (ro) */
- int stbcnt; /* stability limit exceeded (ro) */
-
- int :32; int :32; int :32; int :32;
- int :32; int :32; int :32; int :32;
- int :32; int :32; int :32; int :32;
-};
-
-asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32)
-{
- struct timex txc;
- struct timex32 t32;
- int ret;
- extern int do_adjtimex(struct timex *txc);
-
- if(copy_from_user(&t32, txc_p32, sizeof(struct timex32)))
- return -EFAULT;
-#undef CP
-#define CP(x) txc.x = t32.x
- CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
- CP(status); CP(constant); CP(precision); CP(tolerance);
- CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
- CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
- CP(stbcnt);
- ret = do_adjtimex(&txc);
-#undef CP
-#define CP(x) t32.x = txc.x
- CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
- CP(status); CP(constant); CP(precision); CP(tolerance);
- CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
- CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
- CP(stbcnt);
- return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret;
-}
-
-
struct sysinfo32 {
s32 uptime;
u32 loads[3];
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 71011eadb87..89b6c56ea0a 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -207,7 +207,7 @@
/* struct sockaddr... */
ENTRY_SAME(recvfrom)
/* struct timex contains longs */
- ENTRY_DIFF(adjtimex)
+ ENTRY_COMP(adjtimex)
ENTRY_SAME(mprotect) /* 125 */
/* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
ENTRY_COMP(sigprocmask)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fae42da7468..a433b7126d3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -37,6 +37,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index cb1fe5878e8..ad7a9021220 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -30,9 +30,11 @@
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/sstep.h>
+#include <asm/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -372,17 +374,62 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- resume_execution(cur, regs);
+ const struct exception_table_entry *entry;
+
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the nip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->nip = (unsigned long)cur->addr;
regs->msr &= ~MSR_SE;
regs->msr |= kcb->kprobe_saved_msr;
-
- reset_current_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
+ regs->nip = entry->fixup;
+ return 1;
+ }
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
return 0;
}
@@ -396,6 +443,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+ if (args->regs && user_mode(args->regs))
+ return ret;
+
switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1770a066c21..f698aa77127 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -35,7 +35,6 @@
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
-#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -460,7 +459,6 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
- kprobe_flush_task(current);
discard_lazy_cpu_state();
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index cd75ab2908f..ec274e68881 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -24,7 +24,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -161,78 +160,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
return sys_sysfs((int)option, arg1, arg2);
}
-/* Handle adjtimex compatibility. */
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if(get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if(put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
-
asmlinkage long compat_sys_pause(void)
{
current->state = TASK_INTERRUPTIBLE;
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index 8b0c132bc16..add8c1a9af6 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -13,12 +13,12 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/cacheflush.h>
#include "mmu_decl.h"
-static DECLARE_MUTEX(imlist_sem);
+static DEFINE_MUTEX(imlist_mutex);
struct vm_struct * imlist = NULL;
static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
@@ -257,7 +257,7 @@ struct vm_struct * im_get_free_area(unsigned long size)
struct vm_struct *area;
unsigned long addr;
- down(&imlist_sem);
+ mutex_lock(&imlist_mutex);
if (get_free_im_addr(size, &addr)) {
printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
__FUNCTION__, size);
@@ -272,7 +272,7 @@ struct vm_struct * im_get_free_area(unsigned long size)
__FUNCTION__, addr, size);
}
next_im_done:
- up(&imlist_sem);
+ mutex_unlock(&imlist_mutex);
return area;
}
@@ -281,9 +281,9 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
{
struct vm_struct *area;
- down(&imlist_sem);
+ mutex_lock(&imlist_mutex);
area = __im_get_area(v_addr, size, criteria);
- up(&imlist_sem);
+ mutex_unlock(&imlist_mutex);
return area;
}
@@ -297,17 +297,17 @@ void im_free(void * addr)
printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
return;
}
- down(&imlist_sem);
+ mutex_lock(&imlist_mutex);
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
unmap_vm_area(tmp);
kfree(tmp);
- up(&imlist_sem);
+ mutex_unlock(&imlist_mutex);
return;
}
}
- up(&imlist_sem);
+ mutex_unlock(&imlist_mutex);
printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
addr);
}
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index d75ae03df68..a8fa1eeeb17 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/spu.h>
#include <asm/mmu_context.h>
@@ -342,7 +342,7 @@ spu_free_irqs(struct spu *spu)
}
static LIST_HEAD(spu_list);
-static DECLARE_MUTEX(spu_mutex);
+static DEFINE_MUTEX(spu_mutex);
static void spu_init_channels(struct spu *spu)
{
@@ -382,7 +382,7 @@ struct spu *spu_alloc(void)
{
struct spu *spu;
- down(&spu_mutex);
+ mutex_lock(&spu_mutex);
if (!list_empty(&spu_list)) {
spu = list_entry(spu_list.next, struct spu, list);
list_del_init(&spu->list);
@@ -391,7 +391,7 @@ struct spu *spu_alloc(void)
pr_debug("No SPU left\n");
spu = NULL;
}
- up(&spu_mutex);
+ mutex_unlock(&spu_mutex);
if (spu)
spu_init_channels(spu);
@@ -402,9 +402,9 @@ EXPORT_SYMBOL_GPL(spu_alloc);
void spu_free(struct spu *spu)
{
- down(&spu_mutex);
+ mutex_lock(&spu_mutex);
list_add_tail(&spu->list, &spu_list);
- up(&spu_mutex);
+ mutex_unlock(&spu_mutex);
}
EXPORT_SYMBOL_GPL(spu_free);
@@ -633,14 +633,14 @@ static int __init create_spu(struct device_node *spe)
spu->wbox_callback = NULL;
spu->stop_callback = NULL;
- down(&spu_mutex);
+ mutex_lock(&spu_mutex);
spu->number = number++;
ret = spu_request_irqs(spu);
if (ret)
goto out_unmap;
list_add(&spu->list, &spu_list);
- up(&spu_mutex);
+ mutex_unlock(&spu_mutex);
pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
spu->name, spu->isrc, spu->local_store,
@@ -648,7 +648,7 @@ static int __init create_spu(struct device_node *spe)
goto out;
out_unmap:
- up(&spu_mutex);
+ mutex_unlock(&spu_mutex);
spu_unmap(spu);
out_free:
kfree(spu);
@@ -668,10 +668,10 @@ static void destroy_spu(struct spu *spu)
static void cleanup_spu_base(void)
{
struct spu *spu, *tmp;
- down(&spu_mutex);
+ mutex_lock(&spu_mutex);
list_for_each_entry_safe(spu, tmp, &spu_list, list)
destroy_spu(spu);
- up(&spu_mutex);
+ mutex_unlock(&spu_mutex);
}
module_exit(cleanup_spu_base);
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index a415e8d2f7a..b57e465a1b7 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -21,6 +21,7 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/completion.h>
+#include <linux/mutex.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -90,7 +91,7 @@ static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
-static DECLARE_MUTEX(g5_switch_mutex);
+static DEFINE_MUTEX(g5_switch_mutex);
static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
@@ -327,7 +328,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
if (g5_pmode_cur == newstate)
return 0;
- down(&g5_switch_mutex);
+ mutex_lock(&g5_switch_mutex);
freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
freqs.new = g5_cpu_freqs[newstate].frequency;
@@ -337,7 +338,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
rc = g5_switch_freq(newstate);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- up(&g5_switch_mutex);
+ mutex_unlock(&g5_switch_mutex);
return rc;
}
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 54a0a9bb12d..3a3e302b4ea 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -19,6 +19,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2b7364ed23b..01c5c082f97 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -14,6 +14,10 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
index def02bdc44a..54fb11d7fad 100644
--- a/arch/s390/crypto/crypt_s390_query.c
+++ b/arch/s390/crypto/crypt_s390_query.c
@@ -55,7 +55,7 @@ static void query_available_functions(void)
printk(KERN_INFO "KMC_AES_256: %d\n",
crypt_s390_func_available(KMC_AES_256_ENCRYPT));
- /* query available KIMD fucntions */
+ /* query available KIMD functions */
printk(KERN_INFO "KIMD_QUERY: %d\n",
crypt_s390_func_available(KIMD_QUERY));
printk(KERN_INFO "KIMD_SHA_1: %d\n",
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index cc058dc3bc8..5e14de37c17 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -26,7 +26,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -705,79 +704,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
return ret;
}
-/* Handle adjtimex compatibility. */
-
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if(get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if(put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
-
#ifdef CONFIG_SYSCTL
struct __sysctl_args32 {
u32 name;
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 50e80138e7a..199da68bd7b 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -551,10 +551,10 @@ sys32_newuname_wrapper:
llgtr %r2,%r2 # struct new_utsname *
jg s390x_newuname # branch to system call
- .globl sys32_adjtimex_wrapper
-sys32_adjtimex_wrapper:
- llgtr %r2,%r2 # struct timex_emu31 *
- jg sys32_adjtimex # branch to system call
+ .globl compat_sys_adjtimex_wrapper
+compat_sys_adjtimex_wrapper:
+ llgtr %r2,%r2 # struct compat_timex *
+ jg compat_sys_adjtimex # branch to system call
.globl sys32_mprotect_wrapper
sys32_mprotect_wrapper:
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 7c88d85c359..2f56654da82 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -132,7 +132,7 @@ SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */
SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper)
+SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
NI_SYSCALL /* old "create module" */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e9b275d9073..58583f45947 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_HARDIRQS
bool
default y
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index 07b172deb87..58c678e0666 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7c58fc1a39c..9431e967aa4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -150,6 +150,14 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 267afddf63c..d1e2fc56648 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -162,6 +162,14 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y if !ULTRA_HAS_POPULATION_COUNT
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index b9a9ce70e55..ffc7309e9f2 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -6,9 +6,11 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
+#include <linux/module.h>
#include <asm/kdebug.h>
#include <asm/signal.h>
#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
/* We do not have hardware single-stepping on sparc64.
* So we implement software single-stepping with breakpoint
@@ -302,16 +304,68 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ const struct exception_table_entry *entry;
+
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the tpc points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->tpc = (unsigned long)cur->addr;
+ regs->tnpc = kcb->kprobe_orig_tnpc;
+ regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+ kcb->kprobe_orig_tstate_pil);
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- resume_execution(cur, regs, kcb);
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ regs->tpc = entry->fixup;
+ regs->tnpc = regs->tpc + 4;
+ return 1;
+ }
- reset_current_kprobe();
- preempt_enable_no_resched();
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
+
return 0;
}
@@ -324,6 +378,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+ if (args->regs && user_mode(args->regs))
+ return ret;
+
switch (val) {
case DIE_DEBUG:
if (kprobe_handler(args->regs))
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9914a17651b..c7fbbcfce82 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -175,11 +175,6 @@ EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
-/* Bit searching */
-EXPORT_SYMBOL(find_next_bit);
-EXPORT_SYMBOL(find_next_zero_bit);
-EXPORT_SYMBOL(find_next_zero_le_bit);
-
EXPORT_SYMBOL(ivector_table);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 0e41df02448..2e906bad56f 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -19,7 +19,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -945,79 +944,6 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
return ret;
}
-/* Handle adjtimex compatibility. */
-
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if (get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if (put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
-
/* This is just a version for 32-bit applications which does
* not force O_LARGEFILE on.
*/
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index c3adb7ac167..3b250f2318f 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -63,7 +63,7 @@ sys_call_table32:
/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
- .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex
+ .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 8812ded19f0..4a725d8985f 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
NGpage.o NGbzero.o \
copy_in_user.o user_fixup.o memmove.o \
- mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+ mcount.o ipcsum.o rwsem.o xor.o delay.o
obj-y += iomap.o
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
deleted file mode 100644
index 6059557067b..00000000000
--- a/arch/sparc64/lib/find_bit.c
+++ /dev/null
@@ -1,127 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < 64)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (64 - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
- */
-
-unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (64-offset);
- if (size < 64)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = addr + (offset >> 6);
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if(offset) {
- tmp = __swab64p(p++);
- tmp |= (~0UL >> (64-offset));
- if(size < 64)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while(size & ~63) {
- if(~(tmp = __swab64p(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if(!size)
- return result;
- tmp = __swab64p(p);
-found_first:
- tmp |= (~0UL << size);
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index ef79ed25aec..85e6a55b3b5 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -52,3 +52,8 @@ config ARCH_HAS_SC_SIGNALS
config ARCH_REUSE_HOST_VSYSCALL_AREA
bool
default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index aae19bc4b06..f60e9e50642 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -46,3 +46,8 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
config SMP_BROKEN
bool
default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index e7fc3e50034..37ec644603a 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -16,6 +16,12 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
default n
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+config GENERIC_HWEIGHT
+ bool
+ default y
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6420baeb8c1..45efe0ca88f 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -45,6 +45,10 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 0fbc0283609..585fd4a559c 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -70,7 +70,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
boot := arch/x86_64/boot
PHONY += bzImage bzlilo install archmrproper \
- fdimage fdimage144 fdimage288 archclean
+ fdimage fdimage144 fdimage288 isoimage archclean
#Default target when executing "make"
all: bzImage
@@ -87,7 +87,7 @@ bzlilo: vmlinux
bzdisk: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
-fdimage fdimage144 fdimage288: vmlinux
+fdimage fdimage144 fdimage288 isoimage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
install:
@@ -99,11 +99,16 @@ archclean:
define archhelp
echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)'
echo ' install - Install kernel using'
- echo ' (your) ~/bin/installkernel or'
- echo ' (distribution) /sbin/installkernel or'
- echo ' install to $$(INSTALL_PATH) and run lilo'
+ echo ' (your) ~/bin/installkernel or'
+ echo ' (distribution) /sbin/installkernel or'
+ echo ' install to $$(INSTALL_PATH) and run lilo'
+ echo ' bzdisk - Create a boot floppy in /dev/fd0'
+ echo ' fdimage - Create a boot floppy image'
+ echo ' isoimage - Create a boot CD-ROM image'
endef
-CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf
+CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
+ arch/$(ARCH)/boot/image.iso \
+ arch/$(ARCH)/boot/mtools.conf
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 29f8396ed15..43ee6c50c27 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -60,8 +60,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE
$(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@
-# Set this if you want to pass append arguments to the zdisk/fdimage kernel
+# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel
FDARGS =
+# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel
+FDINITRD =
+
+image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
@@ -70,8 +74,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
syslinux /dev/fd0 ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync
# These require being root or having syslinux 2.02 or higher installed
@@ -79,18 +86,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
syslinux $(obj)/fdimage ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync
fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
syslinux $(obj)/fdimage ; sync
- echo 'default linux $(FDARGS)' | \
+ echo '$(image_cmdline)' | \
MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
+ fi
MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync
+isoimage: $(BOOTIMAGE)
+ -rm -rf $(obj)/isoimage
+ mkdir $(obj)/isoimage
+ cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
+ $(obj)/isoimage
+ cp $(BOOTIMAGE) $(obj)/isoimage/linux
+ echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
+ fi
+ mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
+ -no-emul-boot -boot-load-size 4 -boot-info-table \
+ $(obj)/isoimage
+ rm -rf $(obj)/isoimage
+
zlilo: $(BOOTIMAGE)
if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 00dee176c08..7549a4389fb 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -501,7 +501,7 @@ ia32_sys_call_table:
.quad sys_setdomainname
.quad sys_uname
.quad sys_modify_ldt
- .quad sys32_adjtimex
+ .quad compat_sys_adjtimex
.quad sys32_mprotect /* 125 */
.quad compat_sys_sigprocmask
.quad quiet_ni_syscall /* create_module */
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 2b2d029f477..f182b20858e 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -30,7 +30,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -767,82 +766,6 @@ sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
return ret;
}
-/* Handle adjtimex compatibility. */
-
-struct timex32 {
- u32 modes;
- s32 offset, freq, maxerror, esterror;
- s32 status, constant, precision, tolerance;
- struct compat_timeval time;
- s32 tick;
- s32 ppsfreq, jitter, shift, stabil;
- s32 jitcnt, calcnt, errcnt, stbcnt;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
- s32 :32; s32 :32; s32 :32; s32 :32;
-};
-
-extern int do_adjtimex(struct timex *);
-
-asmlinkage long
-sys32_adjtimex(struct timex32 __user *utp)
-{
- struct timex txc;
- int ret;
-
- memset(&txc, 0, sizeof(struct timex));
-
- if (!access_ok(VERIFY_READ, utp, sizeof(struct timex32)) ||
- __get_user(txc.modes, &utp->modes) ||
- __get_user(txc.offset, &utp->offset) ||
- __get_user(txc.freq, &utp->freq) ||
- __get_user(txc.maxerror, &utp->maxerror) ||
- __get_user(txc.esterror, &utp->esterror) ||
- __get_user(txc.status, &utp->status) ||
- __get_user(txc.constant, &utp->constant) ||
- __get_user(txc.precision, &utp->precision) ||
- __get_user(txc.tolerance, &utp->tolerance) ||
- __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __get_user(txc.tick, &utp->tick) ||
- __get_user(txc.ppsfreq, &utp->ppsfreq) ||
- __get_user(txc.jitter, &utp->jitter) ||
- __get_user(txc.shift, &utp->shift) ||
- __get_user(txc.stabil, &utp->stabil) ||
- __get_user(txc.jitcnt, &utp->jitcnt) ||
- __get_user(txc.calcnt, &utp->calcnt) ||
- __get_user(txc.errcnt, &utp->errcnt) ||
- __get_user(txc.stbcnt, &utp->stbcnt))
- return -EFAULT;
-
- ret = do_adjtimex(&txc);
-
- if (!access_ok(VERIFY_WRITE, utp, sizeof(struct timex32)) ||
- __put_user(txc.modes, &utp->modes) ||
- __put_user(txc.offset, &utp->offset) ||
- __put_user(txc.freq, &utp->freq) ||
- __put_user(txc.maxerror, &utp->maxerror) ||
- __put_user(txc.esterror, &utp->esterror) ||
- __put_user(txc.status, &utp->status) ||
- __put_user(txc.constant, &utp->constant) ||
- __put_user(txc.precision, &utp->precision) ||
- __put_user(txc.tolerance, &utp->tolerance) ||
- __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
- __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
- __put_user(txc.tick, &utp->tick) ||
- __put_user(txc.ppsfreq, &utp->ppsfreq) ||
- __put_user(txc.jitter, &utp->jitter) ||
- __put_user(txc.shift, &utp->shift) ||
- __put_user(txc.stabil, &utp->stabil) ||
- __put_user(txc.jitcnt, &utp->jitcnt) ||
- __put_user(txc.calcnt, &utp->calcnt) ||
- __put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
- ret = -EFAULT;
-
- return ret;
-}
-
asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 14f0ced613b..accbff3fec4 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -37,10 +37,12 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
+#include <asm/uaccess.h>
void jprobe_return_end(void);
static void __kprobes arch_copy_kprobe(struct kprobe *p);
@@ -578,16 +580,62 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ const struct exception_table_entry *fixup;
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- resume_execution(cur, regs, kcb);
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the rip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->rip = (unsigned long)cur->addr;
regs->eflags |= kcb->kprobe_old_rflags;
-
- reset_current_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ fixup = search_exception_tables(regs->rip);
+ if (fixup) {
+ regs->rip = fixup->fixup;
+ return 1;
+ }
+
+ /*
+ * fixup() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
}
return 0;
}
@@ -601,6 +649,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+ if (args->regs && user_mode(args->regs))
+ return ret;
+
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 81111835722..0370720515f 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -35,8 +35,8 @@
#include <linux/ptrace.h>
#include <linux/utsname.h>
#include <linux/random.h>
-#include <linux/kprobes.h>
#include <linux/notifier.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -353,13 +353,6 @@ void exit_thread(void)
struct task_struct *me = current;
struct thread_struct *t = &me->thread;
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(me);
-
if (me->thread.io_bitmap_ptr) {
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e90ef5db891..dbeb3504c3c 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
config GENERIC_HARDIRQS
bool
default y
diff --git a/block/Kconfig b/block/Kconfig
index 96783645092..43ca070dc0f 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -23,4 +23,13 @@ config BLK_DEV_IO_TRACE
git://brick.kernel.dk/data/git/blktrace.git
+config LSF
+ bool "Support for Large Single Files"
+ depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+ default n
+ help
+ When CONFIG_LBD is disabled, say Y here if you want to
+ handle large file(bigger than 2TB), otherwise say N.
+ When CONFIG_LBD is enabled, Y is set automatically.
+
source block/Kconfig.iosched
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c4a0d5d8d7f..bde40a6ae66 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2191,7 +2191,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
if (!cfqd->cfq_hash)
goto out_cfqhash;
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+ cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index ac5bbaedac1..13b5fd5854a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -156,12 +156,10 @@ acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
{
if (efi_enabled) {
addr->pointer_type = ACPI_PHYSICAL_POINTER;
- if (efi.acpi20)
- addr->pointer.physical =
- (acpi_physical_address) virt_to_phys(efi.acpi20);
- else if (efi.acpi)
- addr->pointer.physical =
- (acpi_physical_address) virt_to_phys(efi.acpi);
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ addr->pointer.physical = efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ addr->pointer.physical = efi.acpi;
else {
printk(KERN_ERR PREFIX
"System description tables not found\n");
@@ -182,22 +180,14 @@ acpi_status
acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
void __iomem ** virt)
{
- if (efi_enabled) {
- if (EFI_MEMORY_WB & efi_mem_attributes(phys)) {
- *virt = (void __iomem *)phys_to_virt(phys);
- } else {
- *virt = ioremap(phys, size);
- }
- } else {
- if (phys > ULONG_MAX) {
- printk(KERN_ERR PREFIX "Cannot map memory that high\n");
- return AE_BAD_PARAMETER;
- }
- /*
- * ioremap checks to ensure this is in reserved space
- */
- *virt = ioremap((unsigned long)phys, size);
+ if (phys > ULONG_MAX) {
+ printk(KERN_ERR PREFIX "Cannot map memory that high\n");
+ return AE_BAD_PARAMETER;
}
+ /*
+ * ioremap checks to ensure this is in reserved space
+ */
+ *virt = ioremap((unsigned long)phys, size);
if (!*virt)
return AE_NO_MEMORY;
@@ -409,18 +399,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
{
u32 dummy;
void __iomem *virt_addr;
- int iomem = 0;
- if (efi_enabled) {
- if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
- /* HACK ALERT! We can use readb/w/l on real memory too.. */
- virt_addr = (void __iomem *)phys_to_virt(phys_addr);
- } else {
- iomem = 1;
- virt_addr = ioremap(phys_addr, width);
- }
- } else
- virt_addr = (void __iomem *)phys_to_virt(phys_addr);
+ virt_addr = ioremap(phys_addr, width);
if (!value)
value = &dummy;
@@ -438,10 +418,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
BUG();
}
- if (efi_enabled) {
- if (iomem)
- iounmap(virt_addr);
- }
+ iounmap(virt_addr);
return AE_OK;
}
@@ -450,18 +427,8 @@ acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
void __iomem *virt_addr;
- int iomem = 0;
- if (efi_enabled) {
- if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
- /* HACK ALERT! We can use writeb/w/l on real memory too */
- virt_addr = (void __iomem *)phys_to_virt(phys_addr);
- } else {
- iomem = 1;
- virt_addr = ioremap(phys_addr, width);
- }
- } else
- virt_addr = (void __iomem *)phys_to_virt(phys_addr);
+ virt_addr = ioremap(phys_addr, width);
switch (width) {
case 8:
@@ -477,8 +444,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
BUG();
}
- if (iomem)
- iounmap(virt_addr);
+ iounmap(virt_addr);
return AE_OK;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 99a3a28594d..713b763884a 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -246,7 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
}
/* --------------------------------------------------------------------------
- Common ACPI processor fucntions
+ Common ACPI processor functions
-------------------------------------------------------------------------- */
/*
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 31d4f3ffc26..7f37c7cc5ef 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -587,7 +587,8 @@ int __init acpi_table_init(void)
return -ENODEV;
}
- rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
+ rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
+ sizeof(struct acpi_table_rsdp));
if (!rsdp) {
printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
return -ENODEV;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e57ac5a4324..875ae769902 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -400,13 +400,16 @@ config BLK_DEV_RAM_SIZE
8192.
config BLK_DEV_INITRD
- bool "Initial RAM disk (initrd) support"
+ bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
help
- The initial RAM disk is a RAM disk that is loaded by the boot loader
- (loadlin or lilo) and that is mounted as root before the normal boot
- procedure. It is typically used to load modules needed to mount the
- "real" root file system, etc. See <file:Documentation/initrd.txt>
- for details.
+ The initial RAM filesystem is a ramfs which is loaded by the
+ boot loader (loadlin or lilo) and that is mounted as root
+ before the normal boot procedure. It is typically used to
+ load modules needed to mount the "real" root file system,
+ etc. See <file:Documentation/initrd.txt> for details.
+
+ If RAM disk support (BLK_DEV_RAM) is also included, this
+ also enables initial RAM disk (initrd) support.
config CDROM_PKTCDVD
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 32fea55fac4..393b86a3dbf 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp)
return;
}
- d->bufpool = mempool_create(MIN_BUFS,
- mempool_alloc_slab, mempool_free_slab,
- buf_pool_cache);
+ d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
if (d->bufpool == NULL) {
printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
"for %ld.%ld\n", d->aoemajor, d->aoeminor);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 840919bba76..d3ad9081697 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -250,6 +250,18 @@ static int irqdma_allocated;
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
+/*
+ * Interrupt freeing also means /proc VFS work - dont do it
+ * from interrupt context. We push this work into keventd:
+ */
+static void fd_free_irq_fn(void *data)
+{
+ fd_free_irq();
+}
+
+static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL);
+
+
static struct request *current_req;
static struct request_queue *floppy_queue;
static void do_fd_request(request_queue_t * q);
@@ -4433,6 +4445,13 @@ static int floppy_grab_irq_and_dma(void)
return 0;
}
spin_unlock_irqrestore(&floppy_usage_lock, flags);
+
+ /*
+ * We might have scheduled a free_irq(), wait it to
+ * drain first:
+ */
+ flush_scheduled_work();
+
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
@@ -4522,7 +4541,7 @@ static void floppy_release_irq_and_dma(void)
if (irqdma_allocated) {
fd_disable_dma();
fd_free_dma();
- fd_free_irq();
+ schedule_work(&fd_free_irq_work);
irqdma_allocated = 0;
}
set_dor(0, ~0, 8);
@@ -4633,6 +4652,8 @@ void cleanup_module(void)
/* eject disk, if any */
fd_eject(0);
+ flush_scheduled_work(); /* fd_free_irq() might be pending */
+
wait_for_completion(&device_release);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 74bf0255e98..9c3b94e8f03 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -839,7 +839,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
set_blocksize(bdev, lo_blocksize);
- kernel_thread(loop_thread, lo, CLONE_KERNEL);
+ error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
+ if (error < 0)
+ goto out_putf;
wait_for_completion(&lo->lo_done);
return 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1d261f985f3..a04f60693c3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
return 1;
}
-static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
-{
- return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
-}
-
-static void pkt_rb_free(void *ptr, void *data)
-{
- kfree(ptr);
-}
-
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
{
struct rb_node *n = rb_next(&node->rb_node);
@@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file)
}
-static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
-{
- return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
-}
-
-static void psd_pool_free(void *ptr, void *data)
-{
- kfree(ptr);
-}
-
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
@@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
if (!pd)
return ret;
- pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
+ pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
+ sizeof(struct pkt_rb_node));
if (!pd->rb_pool)
goto out_mem;
@@ -2639,7 +2620,8 @@ static int __init pkt_init(void)
{
int ret;
- psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
+ psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
+ sizeof(struct packet_stacked_data));
if (!psd_pool)
return -ENOMEM;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 5980f3e886f..facc3f1d9e3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -187,6 +187,7 @@ config MOXA_SMARTIO
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
depends on SERIAL_NONSTANDARD
+ select FW_LOADER
help
This is a driver for the Multi-Tech cards which provide several
serial ports. The driver is experimental and can currently only be
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 7c0684deea0..932feedda26 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -90,7 +90,7 @@ static unsigned int ipmi_poll(struct file *file, poll_table *wait)
spin_lock_irqsave(&priv->recv_msg_lock, flags);
- if (! list_empty(&(priv->recv_msgs)))
+ if (!list_empty(&(priv->recv_msgs)))
mask |= (POLLIN | POLLRDNORM);
spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
@@ -789,21 +789,53 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
" interface. Other values will set the major device number"
" to that value.");
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+ dev_t dev;
+ struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
static struct class *ipmi_class;
-static void ipmi_new_smi(int if_num)
+static void ipmi_new_smi(int if_num, struct device *device)
{
dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
"ipmidev/%d", if_num);
- class_device_create(ipmi_class, NULL, dev, NULL, "ipmi%d", if_num);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ printk(KERN_ERR "ipmi_devintf: Unable to create the"
+ " ipmi class device link\n");
+ return;
+ }
+ entry->dev = dev;
+
+ mutex_lock(&reg_list_mutex);
+ class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num);
+ list_add(&entry->link, &reg_list);
+ mutex_unlock(&reg_list_mutex);
}
static void ipmi_smi_gone(int if_num)
{
- class_device_destroy(ipmi_class, MKDEV(ipmi_major, if_num));
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry(entry, &reg_list, link) {
+ if (entry->dev == dev) {
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ class_device_destroy(ipmi_class, dev);
+ mutex_unlock(&reg_list_mutex);
devfs_remove("ipmidev/%d", if_num);
}
@@ -856,6 +888,14 @@ module_init(init_ipmi_devintf);
static __exit void cleanup_ipmi(void)
{
+ struct ipmi_reg_list *entry, *entry2;
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+ list_del(&entry->link);
+ class_device_destroy(ipmi_class, entry->dev);
+ kfree(entry);
+ }
+ mutex_unlock(&reg_list_mutex);
class_destroy(ipmi_class);
ipmi_smi_watcher_unregister(&smi_watcher);
devfs_remove(DEVICE_NAME);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index abd4c5118a1..b8fb87c6c29 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
#define PFX "IPMI message handler: "
-#define IPMI_DRIVER_VERSION "38.0"
+#define IPMI_DRIVER_VERSION "39.0"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
@@ -162,6 +162,28 @@ struct ipmi_proc_entry
};
#endif
+struct bmc_device
+{
+ struct platform_device *dev;
+ struct ipmi_device_id id;
+ unsigned char guid[16];
+ int guid_set;
+
+ struct kref refcount;
+
+ /* bmc device attributes */
+ struct device_attribute device_id_attr;
+ struct device_attribute provides_dev_sdrs_attr;
+ struct device_attribute revision_attr;
+ struct device_attribute firmware_rev_attr;
+ struct device_attribute version_attr;
+ struct device_attribute add_dev_support_attr;
+ struct device_attribute manufacturer_id_attr;
+ struct device_attribute product_id_attr;
+ struct device_attribute guid_attr;
+ struct device_attribute aux_firmware_rev_attr;
+};
+
#define IPMI_IPMB_NUM_SEQ 64
#define IPMI_MAX_CHANNELS 16
struct ipmi_smi
@@ -178,9 +200,8 @@ struct ipmi_smi
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
- /* The IPMI version of the BMC on the other end. */
- unsigned char version_major;
- unsigned char version_minor;
+ struct bmc_device *bmc;
+ char *my_dev_name;
/* This is the lower-layer's sender routine. */
struct ipmi_smi_handlers *handlers;
@@ -194,6 +215,9 @@ struct ipmi_smi
struct ipmi_proc_entry *proc_entries;
#endif
+ /* Driver-model device for the system interface. */
+ struct device *si_dev;
+
/* A table of sequence numbers for this interface. We use the
sequence numbers for IPMB messages that go out of the
interface to match them up with their responses. A routine
@@ -312,6 +336,7 @@ struct ipmi_smi
/* Events that were received with the proper format. */
unsigned int events;
};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
/* Used to mark an interface entry that cannot be used but is not a
* free entry, either, primarily used at creation and deletion time so
@@ -320,6 +345,15 @@ struct ipmi_smi
#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
|| (i == IPMI_INVALID_INTERFACE_ENTRY))
+/**
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct device_driver ipmidriver = {
+ .name = "ipmi",
+ .bus = &platform_bus_type
+};
+static DEFINE_MUTEX(ipmidriver_mutex);
+
#define MAX_IPMI_INTERFACES 4
static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
@@ -393,7 +427,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
if (IPMI_INVALID_INTERFACE(intf))
continue;
spin_unlock_irqrestore(&interfaces_lock, flags);
- watcher->new_smi(i);
+ watcher->new_smi(i, intf->si_dev);
spin_lock_irqsave(&interfaces_lock, flags);
}
spin_unlock_irqrestore(&interfaces_lock, flags);
@@ -409,14 +443,14 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
static void
-call_smi_watchers(int i)
+call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
down_read(&smi_watchers_sem);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
- w->new_smi(i);
+ w->new_smi(i, dev);
module_put(w->owner);
}
}
@@ -844,8 +878,8 @@ void ipmi_get_version(ipmi_user_t user,
unsigned char *major,
unsigned char *minor)
{
- *major = user->intf->version_major;
- *minor = user->intf->version_minor;
+ *major = ipmi_version_major(&user->intf->bmc->id);
+ *minor = ipmi_version_minor(&user->intf->bmc->id);
}
int ipmi_set_my_address(ipmi_user_t user,
@@ -1553,7 +1587,8 @@ static int version_file_read_proc(char *page, char **start, off_t off,
ipmi_smi_t intf = data;
return sprintf(out, "%d.%d\n",
- intf->version_major, intf->version_minor);
+ ipmi_version_major(&intf->bmc->id),
+ ipmi_version_minor(&intf->bmc->id));
}
static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -1712,6 +1747,470 @@ static void remove_proc_entries(ipmi_smi_t smi)
#endif /* CONFIG_PROC_FS */
}
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+ unsigned char *id = data;
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+ return memcmp(bmc->guid, id, 16) == 0;
+}
+
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ unsigned char *guid)
+{
+ struct device *dev;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev)
+ return dev_get_drvdata(dev);
+ else
+ return NULL;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+ struct prod_dev_id *id = data;
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return (bmc->id.product_id == id->product_id
+ && bmc->id.product_id == id->product_id
+ && bmc->id.device_id == id->device_id);
+}
+
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned char product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev)
+ return dev_get_drvdata(dev);
+ else
+ return NULL;
+}
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%u\n", bmc->id.device_id);
+}
+
+static ssize_t provides_dev_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%u\n",
+ bmc->id.device_revision && 0x80 >> 7);
+}
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u\n",
+ bmc->id.device_revision && 0x0F);
+}
+
+static ssize_t firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
+ bmc->id.firmware_revision_2);
+}
+
+static ssize_t ipmi_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u.%u\n",
+ ipmi_version_major(&bmc->id),
+ ipmi_version_minor(&bmc->id));
+}
+
+static ssize_t add_dev_support_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "0x%02x\n",
+ bmc->id.additional_device_support);
+}
+
+static ssize_t manufacturer_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
+}
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
+}
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bmc->id.aux_firmware_revision[3],
+ bmc->id.aux_firmware_revision[2],
+ bmc->id.aux_firmware_revision[1],
+ bmc->id.aux_firmware_revision[0]);
+}
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 100, "%Lx%Lx\n",
+ (long long) bmc->guid[0],
+ (long long) bmc->guid[8]);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+ struct bmc_device *bmc;
+
+ bmc = container_of(ref, struct bmc_device, refcount);
+
+ device_remove_file(&bmc->dev->dev,
+ &bmc->device_id_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->provides_dev_sdrs_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->revision_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->firmware_rev_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->version_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->add_dev_support_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->manufacturer_id_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->product_id_attr);
+ if (bmc->id.aux_firmware_revision_set)
+ device_remove_file(&bmc->dev->dev,
+ &bmc->aux_firmware_rev_attr);
+ if (bmc->guid_set)
+ device_remove_file(&bmc->dev->dev,
+ &bmc->guid_attr);
+ platform_device_unregister(bmc->dev);
+ kfree(bmc);
+}
+
+static void ipmi_bmc_unregister(ipmi_smi_t intf)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ if (intf->my_dev_name) {
+ sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+ }
+
+ mutex_lock(&ipmidriver_mutex);
+ kref_put(&bmc->refcount, cleanup_bmc_device);
+ mutex_unlock(&ipmidriver_mutex);
+}
+
+static int ipmi_bmc_register(ipmi_smi_t intf)
+{
+ int rv;
+ struct bmc_device *bmc = intf->bmc;
+ struct bmc_device *old_bmc;
+ int size;
+ char dummy[1];
+
+ mutex_lock(&ipmidriver_mutex);
+
+ /*
+ * Try to find if there is an bmc_device struct
+ * representing the interfaced BMC already
+ */
+ if (bmc->guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
+ else
+ old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
+ bmc->id.product_id,
+ bmc->id.device_id);
+
+ /*
+ * If there is already an bmc_device, free the new one,
+ * otherwise register the new BMC device
+ */
+ if (old_bmc) {
+ kfree(bmc);
+ intf->bmc = old_bmc;
+ bmc = old_bmc;
+
+ kref_get(&bmc->refcount);
+ mutex_unlock(&ipmidriver_mutex);
+
+ printk(KERN_INFO
+ "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+ " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ } else {
+ bmc->dev = platform_device_alloc("ipmi_bmc",
+ bmc->id.device_id);
+ if (! bmc->dev) {
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to allocate platform device\n");
+ return -ENOMEM;
+ }
+ bmc->dev->dev.driver = &ipmidriver;
+ dev_set_drvdata(&bmc->dev->dev, bmc);
+ kref_init(&bmc->refcount);
+
+ rv = platform_device_register(bmc->dev);
+ mutex_unlock(&ipmidriver_mutex);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to register bmc device: %d\n",
+ rv);
+ /* Don't go to out_err, you can only do that if
+ the device is registered already. */
+ return rv;
+ }
+
+ bmc->device_id_attr.attr.name = "device_id";
+ bmc->device_id_attr.attr.owner = THIS_MODULE;
+ bmc->device_id_attr.attr.mode = S_IRUGO;
+ bmc->device_id_attr.show = device_id_show;
+
+ bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+ bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
+ bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+ bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+
+
+ bmc->revision_attr.attr.name = "revision";
+ bmc->revision_attr.attr.owner = THIS_MODULE;
+ bmc->revision_attr.attr.mode = S_IRUGO;
+ bmc->revision_attr.show = revision_show;
+
+ bmc->firmware_rev_attr.attr.name = "firmware_revision";
+ bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
+ bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->firmware_rev_attr.show = firmware_rev_show;
+
+ bmc->version_attr.attr.name = "ipmi_version";
+ bmc->version_attr.attr.owner = THIS_MODULE;
+ bmc->version_attr.attr.mode = S_IRUGO;
+ bmc->version_attr.show = ipmi_version_show;
+
+ bmc->add_dev_support_attr.attr.name
+ = "additional_device_support";
+ bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
+ bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+ bmc->add_dev_support_attr.show = add_dev_support_show;
+
+ bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+ bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
+ bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+ bmc->manufacturer_id_attr.show = manufacturer_id_show;
+
+ bmc->product_id_attr.attr.name = "product_id";
+ bmc->product_id_attr.attr.owner = THIS_MODULE;
+ bmc->product_id_attr.attr.mode = S_IRUGO;
+ bmc->product_id_attr.show = product_id_show;
+
+ bmc->guid_attr.attr.name = "guid";
+ bmc->guid_attr.attr.owner = THIS_MODULE;
+ bmc->guid_attr.attr.mode = S_IRUGO;
+ bmc->guid_attr.show = guid_show;
+
+ bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+ bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
+ bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+
+ device_create_file(&bmc->dev->dev,
+ &bmc->device_id_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->provides_dev_sdrs_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->revision_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->firmware_rev_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->version_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->add_dev_support_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->manufacturer_id_attr);
+ device_create_file(&bmc->dev->dev,
+ &bmc->product_id_attr);
+ if (bmc->id.aux_firmware_revision_set)
+ device_create_file(&bmc->dev->dev,
+ &bmc->aux_firmware_rev_attr);
+ if (bmc->guid_set)
+ device_create_file(&bmc->dev->dev,
+ &bmc->guid_attr);
+
+ printk(KERN_INFO
+ "ipmi: Found new BMC (man_id: 0x%6.6x, "
+ " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ }
+
+ /*
+ * create symlink from system interface device to bmc device
+ * and back.
+ */
+ rv = sysfs_create_link(&intf->si_dev->kobj,
+ &bmc->dev->dev.kobj, "bmc");
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_msghandler: Unable to create bmc symlink: %d\n",
+ rv);
+ goto out_err;
+ }
+
+ size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
+ intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
+ if (!intf->my_dev_name) {
+ rv = -ENOMEM;
+ printk(KERN_ERR
+ "ipmi_msghandler: allocate link from BMC: %d\n",
+ rv);
+ goto out_err;
+ }
+ snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
+
+ rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
+ intf->my_dev_name);
+ if (rv) {
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to create symlink to bmc: %d\n",
+ rv);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ ipmi_bmc_unregister(intf);
+ return rv;
+}
+
+static int
+send_guid_cmd(ipmi_smi_t intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ -1, 0);
+}
+
+static void
+guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+ /* Not for me */
+ return;
+
+ if (msg->msg.data[0] != 0) {
+ /* Error from getting the GUID, the BMC doesn't have one. */
+ intf->bmc->guid_set = 0;
+ goto out;
+ }
+
+ if (msg->msg.data_len < 17) {
+ intf->bmc->guid_set = 0;
+ printk(KERN_WARNING PFX
+ "guid_handler: The GUID response from the BMC was too"
+ " short, it was %d but should have been 17. Assuming"
+ " GUID is not available.\n",
+ msg->msg.data_len);
+ goto out;
+ }
+
+ memcpy(intf->bmc->guid, msg->msg.data, 16);
+ intf->bmc->guid_set = 1;
+ out:
+ wake_up(&intf->waitq);
+}
+
+static void
+get_guid(ipmi_smi_t intf)
+{
+ int rv;
+
+ intf->bmc->guid_set = 0x2;
+ intf->null_user_handler = guid_handler;
+ rv = send_guid_cmd(intf, 0);
+ if (rv)
+ /* Send failed, no GUID available. */
+ intf->bmc->guid_set = 0;
+ wait_event(intf->waitq, intf->bmc->guid_set != 2);
+ intf->null_user_handler = NULL;
+}
+
static int
send_channel_info_cmd(ipmi_smi_t intf, int chan)
{
@@ -1804,8 +2303,8 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
- unsigned char version_major,
- unsigned char version_minor,
+ struct ipmi_device_id *device_id,
+ struct device *si_dev,
unsigned char slave_addr,
ipmi_smi_t *new_intf)
{
@@ -1813,7 +2312,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
int rv;
ipmi_smi_t intf;
unsigned long flags;
+ int version_major;
+ int version_minor;
+ version_major = ipmi_version_major(device_id);
+ version_minor = ipmi_version_minor(device_id);
/* Make sure the driver is actually initialized, this handles
problems with initialization order. */
@@ -1831,10 +2334,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (!intf)
return -ENOMEM;
memset(intf, 0, sizeof(*intf));
+ intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
+ if (!intf->bmc) {
+ kfree(intf);
+ return -ENOMEM;
+ }
intf->intf_num = -1;
kref_init(&intf->refcount);
- intf->version_major = version_major;
- intf->version_minor = version_minor;
+ intf->bmc->id = *device_id;
+ intf->si_dev = si_dev;
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
intf->channels[j].lun = 2;
@@ -1884,6 +2392,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
caller before sending any messages with it. */
*new_intf = intf;
+ get_guid(intf);
+
if ((version_major > 1)
|| ((version_major == 1) && (version_minor >= 5)))
{
@@ -1898,6 +2408,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
/* Wait for the channel info to be read. */
wait_event(intf->waitq,
intf->curr_channel >= IPMI_MAX_CHANNELS);
+ intf->null_user_handler = NULL;
} else {
/* Assume a single IPMB channel at zero. */
intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
@@ -1907,6 +2418,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv == 0)
rv = add_proc_entries(intf, i);
+ rv = ipmi_bmc_register(intf);
+
out:
if (rv) {
if (intf->proc_dir)
@@ -1921,7 +2434,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
spin_lock_irqsave(&interfaces_lock, flags);
ipmi_interfaces[i] = intf;
spin_unlock_irqrestore(&interfaces_lock, flags);
- call_smi_watchers(i);
+ call_smi_watchers(i, intf->si_dev);
}
return rv;
@@ -1933,6 +2446,8 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
struct ipmi_smi_watcher *w;
unsigned long flags;
+ ipmi_bmc_unregister(intf);
+
spin_lock_irqsave(&interfaces_lock, flags);
for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
if (ipmi_interfaces[i] == intf) {
@@ -3196,10 +3711,17 @@ static struct notifier_block panic_block = {
static int ipmi_init_msghandler(void)
{
int i;
+ int rv;
if (initialized)
return 0;
+ rv = driver_register(&ipmidriver);
+ if (rv) {
+ printk(KERN_ERR PFX "Could not register IPMI driver\n");
+ return rv;
+ }
+
printk(KERN_INFO "ipmi message handler version "
IPMI_DRIVER_VERSION "\n");
@@ -3256,6 +3778,8 @@ static __exit void cleanup_ipmi(void)
remove_proc_entry(proc_ipmi_root->name, &proc_root);
#endif /* CONFIG_PROC_FS */
+ driver_unregister(&ipmidriver);
+
initialized = 0;
/* Check for buffer leaks. */
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e8ed26b77d4..786a2802ca3 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -464,7 +464,7 @@ static void ipmi_poweroff_function (void)
/* Wait for an IPMI interface to be installed, the first one installed
will be grabbed by this code and used to perform the powerdown. */
-static void ipmi_po_new_smi(int if_num)
+static void ipmi_po_new_smi(int if_num, struct device *device)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e59b638766e..12f858dc999 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -52,6 +52,7 @@
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
+#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -109,21 +110,15 @@ enum si_intf_state {
enum si_type {
SI_KCS, SI_SMIC, SI_BT
};
+static char *si_to_str[] = { "KCS", "SMIC", "BT" };
-struct ipmi_device_id {
- unsigned char device_id;
- unsigned char device_revision;
- unsigned char firmware_revision_1;
- unsigned char firmware_revision_2;
- unsigned char ipmi_version;
- unsigned char additional_device_support;
- unsigned char manufacturer_id[3];
- unsigned char product_id[2];
- unsigned char aux_firmware_revision[4];
-} __attribute__((packed));
-
-#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
-#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
+#define DEVICE_NAME "ipmi_si"
+
+static struct device_driver ipmi_driver =
+{
+ .name = DEVICE_NAME,
+ .bus = &platform_bus_type
+};
struct smi_info
{
@@ -147,6 +142,9 @@ struct smi_info
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
+ char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+ void (*addr_source_cleanup)(struct smi_info *info);
+ void *addr_source_data;
/* Per-OEM handler, called from handle_flags().
Returns 1 when handle_flags() needs to be re-run
@@ -203,8 +201,17 @@ struct smi_info
interrupts. */
int interrupt_disabled;
+ /* From the get device id response... */
struct ipmi_device_id device_id;
+ /* Driver model stuff. */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* True if we allocated the device, false if it came from
+ * someplace else (like PCI). */
+ int dev_registered;
+
/* Slave address, could be reported from DMI. */
unsigned char slave_addr;
@@ -224,8 +231,12 @@ struct smi_info
unsigned long incoming_messages;
struct task_struct *thread;
+
+ struct list_head link;
};
+static int try_smi_init(struct smi_info *smi);
+
static struct notifier_block *xaction_notifier_list;
static int register_xaction_notifier(struct notifier_block * nb)
{
@@ -271,13 +282,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
spin_lock(&(smi_info->msg_lock));
/* Pick the high priority queue first. */
- if (! list_empty(&(smi_info->hp_xmit_msgs))) {
+ if (!list_empty(&(smi_info->hp_xmit_msgs))) {
entry = smi_info->hp_xmit_msgs.next;
- } else if (! list_empty(&(smi_info->xmit_msgs))) {
+ } else if (!list_empty(&(smi_info->xmit_msgs))) {
entry = smi_info->xmit_msgs.next;
}
- if (! entry) {
+ if (!entry) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
@@ -344,7 +355,7 @@ static void start_clear_flags(struct smi_info *smi_info)
memory, we will re-enable the interrupt. */
static inline void disable_si_irq(struct smi_info *smi_info)
{
- if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
disable_irq_nosync(smi_info->irq);
smi_info->interrupt_disabled = 1;
}
@@ -375,7 +386,7 @@ static void handle_flags(struct smi_info *smi_info)
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (! smi_info->curr_msg) {
+ if (!smi_info->curr_msg) {
disable_si_irq(smi_info);
smi_info->si_state = SI_NORMAL;
return;
@@ -394,7 +405,7 @@ static void handle_flags(struct smi_info *smi_info)
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (! smi_info->curr_msg) {
+ if (!smi_info->curr_msg) {
disable_si_irq(smi_info);
smi_info->si_state = SI_NORMAL;
return;
@@ -430,7 +441,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
#endif
switch (smi_info->si_state) {
case SI_NORMAL:
- if (! smi_info->curr_msg)
+ if (!smi_info->curr_msg)
break;
smi_info->curr_msg->rsp_size
@@ -880,7 +891,7 @@ static void smi_timeout(unsigned long data)
smi_info->last_timeout_jiffies = jiffies_now;
- if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
spin_lock_irqsave(&smi_info->count_lock, flags);
@@ -974,15 +985,10 @@ static struct ipmi_smi_handlers handlers =
a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
#define SI_MAX_PARMS 4
-#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
-static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
-{ NULL, NULL, NULL, NULL };
+static LIST_HEAD(smi_infos);
+static DECLARE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
-#define DEVICE_NAME "ipmi_si"
-
-#define DEFAULT_KCS_IO_PORT 0xca2
-#define DEFAULT_SMIC_IO_PORT 0xca9
-#define DEFAULT_BT_IO_PORT 0xe4
#define DEFAULT_REGSPACING 1
static int si_trydefaults = 1;
@@ -1053,38 +1059,23 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
" by interface number.");
+#define IPMI_IO_ADDR_SPACE 0
#define IPMI_MEM_ADDR_SPACE 1
-#define IPMI_IO_ADDR_SPACE 2
+static char *addr_space_to_str[] = { "I/O", "memory" };
-#if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI)
-static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
+static void std_irq_cleanup(struct smi_info *info)
{
- int i;
-
- for (i = 0; i < SI_MAX_PARMS; ++i) {
- /* Don't check our address. */
- if (i == intf)
- continue;
- if (si_type[i] != NULL) {
- if ((addr_space == IPMI_MEM_ADDR_SPACE &&
- base_addr == addrs[i]) ||
- (addr_space == IPMI_IO_ADDR_SPACE &&
- base_addr == ports[i]))
- return 0;
- }
- else
- break;
- }
-
- return 1;
+ if (info->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
+ free_irq(info->irq, info);
}
-#endif
static int std_irq_setup(struct smi_info *info)
{
int rv;
- if (! info->irq)
+ if (!info->irq)
return 0;
if (info->si_type == SI_BT) {
@@ -1093,7 +1084,7 @@ static int std_irq_setup(struct smi_info *info)
SA_INTERRUPT,
DEVICE_NAME,
info);
- if (! rv)
+ if (!rv)
/* Enable the interrupt in the BT interface. */
info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1110,88 +1101,77 @@ static int std_irq_setup(struct smi_info *info)
DEVICE_NAME, info->irq);
info->irq = 0;
} else {
+ info->irq_cleanup = std_irq_cleanup;
printk(" Using irq %d\n", info->irq);
}
return rv;
}
-static void std_irq_cleanup(struct smi_info *info)
-{
- if (! info->irq)
- return;
-
- if (info->si_type == SI_BT)
- /* Disable the interrupt in the BT interface. */
- info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
- free_irq(info->irq, info);
-}
-
static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- return inb((*addr)+(offset*io->regspacing));
+ return inb(addr + (offset * io->regspacing));
}
static void port_outb(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- outb(b, (*addr)+(offset * io->regspacing));
+ outb(b, addr + (offset * io->regspacing));
}
static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outw(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- outw(b << io->regshift, (*addr)+(offset * io->regspacing));
+ outw(b << io->regshift, addr + (offset * io->regspacing));
}
static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outl(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
- unsigned int *addr = io->info;
+ unsigned int addr = io->addr_data;
- outl(b << io->regshift, (*addr)+(offset * io->regspacing));
+ outl(b << io->regshift, addr+(offset * io->regspacing));
}
static void port_cleanup(struct smi_info *info)
{
- unsigned int *addr = info->io.info;
- int mapsize;
+ unsigned int addr = info->io.addr_data;
+ int mapsize;
- if (addr && (*addr)) {
+ if (addr) {
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
- release_region (*addr, mapsize);
+ release_region (addr, mapsize);
}
- kfree(info);
}
static int port_setup(struct smi_info *info)
{
- unsigned int *addr = info->io.info;
- int mapsize;
+ unsigned int addr = info->io.addr_data;
+ int mapsize;
- if (! addr || (! *addr))
+ if (!addr)
return -ENODEV;
info->io_cleanup = port_cleanup;
@@ -1225,51 +1205,11 @@ static int port_setup(struct smi_info *info)
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
- if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
+ if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
return -EIO;
return 0;
}
-static int try_init_port(int intf_num, struct smi_info **new_info)
-{
- struct smi_info *info;
-
- if (! ports[intf_num])
- return -ENODEV;
-
- if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
- ports[intf_num]))
- return -ENODEV;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
- return -ENOMEM;
- }
- memset(info, 0, sizeof(*info));
-
- info->io_setup = port_setup;
- info->io.info = &(ports[intf_num]);
- info->io.addr = NULL;
- info->io.regspacing = regspacings[intf_num];
- if (! info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsizes[intf_num];
- if (! info->io.regsize)
- info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = regshifts[intf_num];
- info->irq = 0;
- info->irq_setup = NULL;
- *new_info = info;
-
- if (si_type[intf_num] == NULL)
- si_type[intf_num] = "kcs";
-
- printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
- si_type[intf_num], ports[intf_num]);
- return 0;
-}
-
static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
{
return readb((io->addr)+(offset * io->regspacing));
@@ -1321,7 +1261,7 @@ static void mem_outq(struct si_sm_io *io, unsigned int offset,
static void mem_cleanup(struct smi_info *info)
{
- unsigned long *addr = info->io.info;
+ unsigned long addr = info->io.addr_data;
int mapsize;
if (info->io.addr) {
@@ -1330,17 +1270,16 @@ static void mem_cleanup(struct smi_info *info)
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
- release_mem_region(*addr, mapsize);
+ release_mem_region(addr, mapsize);
}
- kfree(info);
}
static int mem_setup(struct smi_info *info)
{
- unsigned long *addr = info->io.info;
+ unsigned long addr = info->io.addr_data;
int mapsize;
- if (! addr || (! *addr))
+ if (!addr)
return -ENODEV;
info->io_cleanup = mem_cleanup;
@@ -1380,57 +1319,83 @@ static int mem_setup(struct smi_info *info)
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
- if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
+ if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
return -EIO;
- info->io.addr = ioremap(*addr, mapsize);
+ info->io.addr = ioremap(addr, mapsize);
if (info->io.addr == NULL) {
- release_mem_region(*addr, mapsize);
+ release_mem_region(addr, mapsize);
return -EIO;
}
return 0;
}
-static int try_init_mem(int intf_num, struct smi_info **new_info)
+
+static __devinit void hardcode_find_bmc(void)
{
+ int i;
struct smi_info *info;
- if (! addrs[intf_num])
- return -ENODEV;
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (!ports[i] && !addrs[i])
+ continue;
- if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
- addrs[intf_num]))
- return -ENODEV;
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
- return -ENOMEM;
- }
- memset(info, 0, sizeof(*info));
+ info->addr_source = "hardcoded";
- info->io_setup = mem_setup;
- info->io.info = &addrs[intf_num];
- info->io.addr = NULL;
- info->io.regspacing = regspacings[intf_num];
- if (! info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsizes[intf_num];
- if (! info->io.regsize)
- info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = regshifts[intf_num];
- info->irq = 0;
- info->irq_setup = NULL;
- *new_info = info;
+ if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+ info->si_type = SI_KCS;
+ } else if (strcmp(si_type[i], "smic") == 0) {
+ info->si_type = SI_SMIC;
+ } else if (strcmp(si_type[i], "bt") == 0) {
+ info->si_type = SI_BT;
+ } else {
+ printk(KERN_WARNING
+ "ipmi_si: Interface type specified "
+ "for interface %d, was invalid: %s\n",
+ i, si_type[i]);
+ kfree(info);
+ continue;
+ }
- if (si_type[intf_num] == NULL)
- si_type[intf_num] = "kcs";
+ if (ports[i]) {
+ /* An I/O port */
+ info->io_setup = port_setup;
+ info->io.addr_data = ports[i];
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else if (addrs[i]) {
+ /* A memory port */
+ info->io_setup = mem_setup;
+ info->io.addr_data = addrs[i];
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else {
+ printk(KERN_WARNING
+ "ipmi_si: Interface type specified "
+ "for interface %d, "
+ "but port and address were not set or "
+ "set to zero.\n", i);
+ kfree(info);
+ continue;
+ }
- printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
- si_type[intf_num], addrs[intf_num]);
- return 0;
-}
+ info->io.addr = NULL;
+ info->io.regspacing = regspacings[i];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsizes[i];
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[i];
+ info->irq = irqs[i];
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+ try_smi_init(info);
+ }
+}
#ifdef CONFIG_ACPI
@@ -1470,11 +1435,19 @@ static u32 ipmi_acpi_gpe(void *context)
return ACPI_INTERRUPT_HANDLED;
}
+static void acpi_gpe_irq_cleanup(struct smi_info *info)
+{
+ if (!info->irq)
+ return;
+
+ acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
+}
+
static int acpi_gpe_irq_setup(struct smi_info *info)
{
acpi_status status;
- if (! info->irq)
+ if (!info->irq)
return 0;
/* FIXME - is level triggered right? */
@@ -1491,19 +1464,12 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
info->irq = 0;
return -EINVAL;
} else {
+ info->irq_cleanup = acpi_gpe_irq_cleanup;
printk(" Using ACPI GPE %d\n", info->irq);
return 0;
}
}
-static void acpi_gpe_irq_cleanup(struct smi_info *info)
-{
- if (! info->irq)
- return;
-
- acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
-}
-
/*
* Defined at
* http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
@@ -1546,28 +1512,12 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static int try_init_acpi(int intf_num, struct smi_info **new_info)
+static __devinit int try_init_acpi(struct SPMITable *spmi)
{
struct smi_info *info;
- acpi_status status;
- struct SPMITable *spmi;
char *io_type;
u8 addr_space;
- if (acpi_disabled)
- return -ENODEV;
-
- if (acpi_failure)
- return -ENODEV;
-
- status = acpi_get_firmware_table("SPMI", intf_num+1,
- ACPI_LOGICAL_ADDRESSING,
- (struct acpi_table_header **) &spmi);
- if (status != AE_OK) {
- acpi_failure = 1;
- return -ENODEV;
- }
-
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
@@ -1577,47 +1527,42 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
addr_space = IPMI_MEM_ADDR_SPACE;
else
addr_space = IPMI_IO_ADDR_SPACE;
- if (! is_new_interface(-1, addr_space, spmi->addr.address))
- return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ return -ENOMEM;
+ }
+
+ info->addr_source = "ACPI";
/* Figure out the interface type. */
switch (spmi->InterfaceType)
{
case 1: /* KCS */
- si_type[intf_num] = "kcs";
+ info->si_type = SI_KCS;
break;
-
case 2: /* SMIC */
- si_type[intf_num] = "smic";
+ info->si_type = SI_SMIC;
break;
-
case 3: /* BT */
- si_type[intf_num] = "bt";
+ info->si_type = SI_BT;
break;
-
default:
printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
spmi->InterfaceType);
+ kfree(info);
return -EIO;
}
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
- return -ENOMEM;
- }
- memset(info, 0, sizeof(*info));
-
if (spmi->InterruptType & 1) {
/* We've got a GPE interrupt. */
info->irq = spmi->GPE;
info->irq_setup = acpi_gpe_irq_setup;
- info->irq_cleanup = acpi_gpe_irq_cleanup;
} else if (spmi->InterruptType & 2) {
/* We've got an APIC/SAPIC interrupt. */
info->irq = spmi->GlobalSystemInterrupt;
info->irq_setup = std_irq_setup;
- info->irq_cleanup = std_irq_cleanup;
} else {
/* Use the default interrupt setting. */
info->irq = 0;
@@ -1626,43 +1571,60 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
if (spmi->addr.register_bit_width) {
/* A (hopefully) properly formed register bit width. */
- regspacings[intf_num] = spmi->addr.register_bit_width / 8;
info->io.regspacing = spmi->addr.register_bit_width / 8;
} else {
- regspacings[intf_num] = DEFAULT_REGSPACING;
info->io.regspacing = DEFAULT_REGSPACING;
}
- regsizes[intf_num] = regspacings[intf_num];
- info->io.regsize = regsizes[intf_num];
- regshifts[intf_num] = spmi->addr.register_bit_offset;
- info->io.regshift = regshifts[intf_num];
+ info->io.regsize = info->io.regspacing;
+ info->io.regshift = spmi->addr.register_bit_offset;
if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
io_type = "memory";
info->io_setup = mem_setup;
- addrs[intf_num] = spmi->addr.address;
- info->io.info = &(addrs[intf_num]);
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
io_type = "I/O";
info->io_setup = port_setup;
- ports[intf_num] = spmi->addr.address;
- info->io.info = &(ports[intf_num]);
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
kfree(info);
printk("ipmi_si: Unknown ACPI I/O Address type\n");
return -EIO;
}
+ info->io.addr_data = spmi->addr.address;
- *new_info = info;
+ try_smi_init(info);
- printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
- si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
return 0;
}
+
+static __devinit void acpi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_firmware_table("SPMI", i+1,
+ ACPI_LOGICAL_ADDRESSING,
+ (struct acpi_table_header **)
+ &spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_acpi(spmi);
+ }
+}
#endif
#ifdef CONFIG_DMI
-typedef struct dmi_ipmi_data
+struct dmi_ipmi_data
{
u8 type;
u8 addr_space;
@@ -1670,49 +1632,46 @@ typedef struct dmi_ipmi_data
u8 irq;
u8 offset;
u8 slave_addr;
-} dmi_ipmi_data_t;
-
-static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
-static int dmi_data_entries;
+};
-static int __init decode_dmi(struct dmi_header *dm, int intf_num)
+static int __devinit decode_dmi(struct dmi_header *dm,
+ struct dmi_ipmi_data *dmi)
{
u8 *data = (u8 *)dm;
unsigned long base_addr;
u8 reg_spacing;
u8 len = dm->length;
- dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
- ipmi_data->type = data[4];
+ dmi->type = data[4];
memcpy(&base_addr, data+8, sizeof(unsigned long));
if (len >= 0x11) {
if (base_addr & 1) {
/* I/O */
base_addr &= 0xFFFE;
- ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
+ dmi->addr_space = IPMI_IO_ADDR_SPACE;
}
else {
/* Memory */
- ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
+ dmi->addr_space = IPMI_MEM_ADDR_SPACE;
}
/* If bit 4 of byte 0x10 is set, then the lsb for the address
is odd. */
- ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
+ dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
- ipmi_data->irq = data[0x11];
+ dmi->irq = data[0x11];
/* The top two bits of byte 0x10 hold the register spacing. */
reg_spacing = (data[0x10] & 0xC0) >> 6;
switch(reg_spacing){
case 0x00: /* Byte boundaries */
- ipmi_data->offset = 1;
+ dmi->offset = 1;
break;
case 0x01: /* 32-bit boundaries */
- ipmi_data->offset = 4;
+ dmi->offset = 4;
break;
case 0x02: /* 16-byte boundaries */
- ipmi_data->offset = 16;
+ dmi->offset = 16;
break;
default:
/* Some other interface, just ignore it. */
@@ -1726,217 +1685,227 @@ static int __init decode_dmi(struct dmi_header *dm, int intf_num)
* wrong (and all that I have seen are I/O) so we just
* ignore that bit and assume I/O. Systems that use
* memory should use the newer spec, anyway. */
- ipmi_data->base_addr = base_addr & 0xfffe;
- ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
- ipmi_data->offset = 1;
- }
-
- ipmi_data->slave_addr = data[6];
-
- if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
- dmi_data_entries++;
- return 0;
+ dmi->base_addr = base_addr & 0xfffe;
+ dmi->addr_space = IPMI_IO_ADDR_SPACE;
+ dmi->offset = 1;
}
- memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
+ dmi->slave_addr = data[6];
- return -1;
+ return 0;
}
-static void __init dmi_find_bmc(void)
+static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
- struct dmi_device *dev = NULL;
- int intf_num = 0;
-
- while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
- if (intf_num >= SI_MAX_DRIVERS)
- break;
+ struct smi_info *info;
- decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR
+ "ipmi_si: Could not allocate SI data\n");
+ return;
}
-}
-
-static int try_init_smbios(int intf_num, struct smi_info **new_info)
-{
- struct smi_info *info;
- dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
- char *io_type;
- if (intf_num >= dmi_data_entries)
- return -ENODEV;
+ info->addr_source = "SMBIOS";
switch (ipmi_data->type) {
- case 0x01: /* KCS */
- si_type[intf_num] = "kcs";
- break;
- case 0x02: /* SMIC */
- si_type[intf_num] = "smic";
- break;
- case 0x03: /* BT */
- si_type[intf_num] = "bt";
- break;
- default:
- return -EIO;
- }
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
- return -ENOMEM;
+ case 0x01: /* KCS */
+ info->si_type = SI_KCS;
+ break;
+ case 0x02: /* SMIC */
+ info->si_type = SI_SMIC;
+ break;
+ case 0x03: /* BT */
+ info->si_type = SI_BT;
+ break;
+ default:
+ return;
}
- memset(info, 0, sizeof(*info));
- if (ipmi_data->addr_space == 1) {
- io_type = "memory";
+ switch (ipmi_data->addr_space) {
+ case IPMI_MEM_ADDR_SPACE:
info->io_setup = mem_setup;
- addrs[intf_num] = ipmi_data->base_addr;
- info->io.info = &(addrs[intf_num]);
- } else if (ipmi_data->addr_space == 2) {
- io_type = "I/O";
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ break;
+
+ case IPMI_IO_ADDR_SPACE:
info->io_setup = port_setup;
- ports[intf_num] = ipmi_data->base_addr;
- info->io.info = &(ports[intf_num]);
- } else {
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ break;
+
+ default:
kfree(info);
- printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
- return -EIO;
+ printk(KERN_WARNING
+ "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+ ipmi_data->addr_space);
+ return;
}
+ info->io.addr_data = ipmi_data->base_addr;
- regspacings[intf_num] = ipmi_data->offset;
- info->io.regspacing = regspacings[intf_num];
- if (! info->io.regspacing)
+ info->io.regspacing = ipmi_data->offset;
+ if (!info->io.regspacing)
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = regshifts[intf_num];
+ info->io.regshift = 0;
info->slave_addr = ipmi_data->slave_addr;
- irqs[intf_num] = ipmi_data->irq;
+ info->irq = ipmi_data->irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
- *new_info = info;
+ try_smi_init(info);
+}
- printk("ipmi_si: Found SMBIOS-specified state machine at %s"
- " address 0x%lx, slave address 0x%x\n",
- io_type, (unsigned long)ipmi_data->base_addr,
- ipmi_data->slave_addr);
- return 0;
+static void __devinit dmi_find_bmc(void)
+{
+ struct dmi_device *dev = NULL;
+ struct dmi_ipmi_data data;
+ int rv;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
+ rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
+ if (!rv)
+ try_init_dmi(&data);
+ }
}
#endif /* CONFIG_DMI */
#ifdef CONFIG_PCI
-#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
+#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
+#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
+#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
+#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
+
#define PCI_HP_VENDOR_ID 0x103C
#define PCI_MMC_DEVICE_ID 0x121A
#define PCI_MMC_ADDR_CW 0x10
-/* Avoid more than one attempt to probe pci smic. */
-static int pci_smic_checked = 0;
+static void ipmi_pci_cleanup(struct smi_info *info)
+{
+ struct pci_dev *pdev = info->addr_source_data;
+
+ pci_disable_device(pdev);
+}
-static int find_pci_smic(int intf_num, struct smi_info **new_info)
+static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- struct smi_info *info;
- int error;
- struct pci_dev *pci_dev = NULL;
- u16 base_addr;
- int fe_rmc = 0;
+ int rv;
+ int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
+ struct smi_info *info;
+ int first_reg_offset = 0;
- if (pci_smic_checked)
- return -ENODEV;
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ENOMEM;
- pci_smic_checked = 1;
+ info->addr_source = "PCI";
- pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
- if (! pci_dev) {
- pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
- if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
- fe_rmc = 1;
- else
- return -ENODEV;
- }
+ switch (class_type) {
+ case PCI_ERMC_CLASSCODE_TYPE_SMIC:
+ info->si_type = SI_SMIC;
+ break;
- error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
- if (error)
- {
- pci_dev_put(pci_dev);
- printk(KERN_ERR
- "ipmi_si: pci_read_config_word() failed (%d).\n",
- error);
- return -ENODEV;
+ case PCI_ERMC_CLASSCODE_TYPE_KCS:
+ info->si_type = SI_KCS;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_BT:
+ info->si_type = SI_BT;
+ break;
+
+ default:
+ kfree(info);
+ printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
+ pci_name(pdev), class_type);
+ return ENOMEM;
}
- /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
- if (! (base_addr & 0x0001))
- {
- pci_dev_put(pci_dev);
- printk(KERN_ERR
- "ipmi_si: memory mapped I/O not supported for PCI"
- " smic.\n");
- return -ENODEV;
+ rv = pci_enable_device(pdev);
+ if (rv) {
+ printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
+ pci_name(pdev));
+ kfree(info);
+ return rv;
}
- base_addr &= 0xFFFE;
- if (! fe_rmc)
- /* Data register starts at base address + 1 in eRMC */
- ++base_addr;
+ info->addr_source_cleanup = ipmi_pci_cleanup;
+ info->addr_source_data = pdev;
- if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
- pci_dev_put(pci_dev);
- return -ENODEV;
- }
+ if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
+ first_reg_offset = 1;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- pci_dev_put(pci_dev);
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
- return -ENOMEM;
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
}
- memset(info, 0, sizeof(*info));
+ info->io.addr_data = pci_resource_start(pdev, 0);
- info->io_setup = port_setup;
- ports[intf_num] = base_addr;
- info->io.info = &(ports[intf_num]);
- info->io.regspacing = regspacings[intf_num];
- if (! info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = regshifts[intf_num];
+ info->io.regshift = 0;
- *new_info = info;
+ info->irq = pdev->irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
- irqs[intf_num] = pci_dev->irq;
- si_type[intf_num] = "smic";
+ info->dev = &pdev->dev;
- printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
- (long unsigned int) base_addr);
+ return try_smi_init(info);
+}
- pci_dev_put(pci_dev);
+static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
+{
+}
+
+#ifdef CONFIG_PM
+static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
return 0;
}
-#endif /* CONFIG_PCI */
-static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
+static int ipmi_pci_resume(struct pci_dev *pdev)
{
-#ifdef CONFIG_PCI
- if (find_pci_smic(intf_num, new_info) == 0)
- return 0;
+ return 0;
+}
#endif
- /* Include other methods here. */
- return -ENODEV;
-}
+static struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
+ { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = __devexit_p(ipmi_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = ipmi_pci_suspend,
+ .resume = ipmi_pci_resume,
+#endif
+};
+#endif /* CONFIG_PCI */
static int try_get_dev_id(struct smi_info *smi_info)
{
- unsigned char msg[2];
- unsigned char *resp;
- unsigned long resp_len;
- enum si_sm_result smi_result;
- int rv = 0;
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ enum si_sm_result smi_result;
+ int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (! resp)
+ if (!resp)
return -ENOMEM;
/* Do a Get Device ID command, since it comes back with some
@@ -1972,7 +1941,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
/* Otherwise, we got some data. */
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
- if (resp_len < 6) {
+ if (resp_len < 14) {
/* That's odd, it should be longer. */
rv = -EINVAL;
goto out;
@@ -1985,8 +1954,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
}
/* Record info from the get device id, in case we need it. */
- memcpy(&smi_info->device_id, &resp[3],
- min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
+ ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
out:
kfree(resp);
@@ -2018,7 +1986,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
struct smi_info *smi = data;
out += sprintf(out, "interrupts_enabled: %d\n",
- smi->irq && ! smi->interrupt_disabled);
+ smi->irq && !smi->interrupt_disabled);
out += sprintf(out, "short_timeouts: %ld\n",
smi->short_timeouts);
out += sprintf(out, "long_timeouts: %ld\n",
@@ -2089,15 +2057,14 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
-#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+#define DELL_IANA_MFR_ID 0x0002a2
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
- const char mfr[3]=DELL_IANA_MFR_ID;
- if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
+ if (id->manufacturer_id == DELL_IANA_MFR_ID) {
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
- id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+ id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
@@ -2169,8 +2136,7 @@ static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
- const char mfr[3]=DELL_IANA_MFR_ID;
- if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
+ if (id->manufacturer_id == DELL_IANA_MFR_ID &&
smi_info->si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -2200,62 +2166,110 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
del_timer_sync(&smi_info->si_timer);
}
-/* Returns 0 if initialized, or negative on an error. */
-static int init_one_smi(int intf_num, struct smi_info **smi)
+static struct ipmi_default_vals
{
- int rv;
- struct smi_info *new_smi;
+ int type;
+ int port;
+} __devinit ipmi_defaults[] =
+{
+ { .type = SI_KCS, .port = 0xca2 },
+ { .type = SI_SMIC, .port = 0xca9 },
+ { .type = SI_BT, .port = 0xe4 },
+ { .port = 0 }
+};
+static __devinit void default_find_bmc(void)
+{
+ struct smi_info *info;
+ int i;
- rv = try_init_mem(intf_num, &new_smi);
- if (rv)
- rv = try_init_port(intf_num, &new_smi);
-#ifdef CONFIG_ACPI
- if (rv && si_trydefaults)
- rv = try_init_acpi(intf_num, &new_smi);
-#endif
-#ifdef CONFIG_DMI
- if (rv && si_trydefaults)
- rv = try_init_smbios(intf_num, &new_smi);
-#endif
- if (rv && si_trydefaults)
- rv = try_init_plug_and_play(intf_num, &new_smi);
+ for (i = 0; ; i++) {
+ if (!ipmi_defaults[i].port)
+ break;
- if (rv)
- return rv;
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
- /* So we know not to free it unless we have allocated one. */
- new_smi->intf = NULL;
- new_smi->si_sm = NULL;
- new_smi->handlers = NULL;
+ info->addr_source = NULL;
- if (! new_smi->irq_setup) {
- new_smi->irq = irqs[intf_num];
- new_smi->irq_setup = std_irq_setup;
- new_smi->irq_cleanup = std_irq_cleanup;
- }
+ info->si_type = ipmi_defaults[i].type;
+ info->io_setup = port_setup;
+ info->io.addr_data = ipmi_defaults[i].port;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
- /* Default to KCS if no type is specified. */
- if (si_type[intf_num] == NULL) {
- if (si_trydefaults)
- si_type[intf_num] = "kcs";
- else {
- rv = -EINVAL;
- goto out_err;
+ info->io.addr = NULL;
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ if (try_smi_init(info) == 0) {
+ /* Found one... */
+ printk(KERN_INFO "ipmi_si: Found default %s state"
+ " machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ return;
}
}
+}
+
+static int is_new_interface(struct smi_info *info)
+{
+ struct smi_info *e;
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.addr_type != info->io.addr_type)
+ continue;
+ if (e->io.addr_data == info->io.addr_data)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv;
+
+ if (new_smi->addr_source) {
+ printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ new_smi->addr_source,
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+ }
+
+ down(&smi_infos_lock);
+ if (!is_new_interface(new_smi)) {
+ printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+ rv = -EBUSY;
+ goto out_err;
+ }
- /* Set up the state machine to use. */
- if (strcmp(si_type[intf_num], "kcs") == 0) {
+ /* So we know not to free it unless we have allocated one. */
+ new_smi->intf = NULL;
+ new_smi->si_sm = NULL;
+ new_smi->handlers = NULL;
+
+ switch (new_smi->si_type) {
+ case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
- new_smi->si_type = SI_KCS;
- } else if (strcmp(si_type[intf_num], "smic") == 0) {
+ break;
+
+ case SI_SMIC:
new_smi->handlers = &smic_smi_handlers;
- new_smi->si_type = SI_SMIC;
- } else if (strcmp(si_type[intf_num], "bt") == 0) {
+ break;
+
+ case SI_BT:
new_smi->handlers = &bt_smi_handlers;
- new_smi->si_type = SI_BT;
- } else {
+ break;
+
+ default:
/* No support for anything else yet. */
rv = -EIO;
goto out_err;
@@ -2263,7 +2277,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
- if (! new_smi->si_sm) {
+ if (!new_smi->si_sm) {
printk(" Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
@@ -2284,21 +2298,29 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
+ if (new_smi->addr_source)
+ printk(KERN_INFO "ipmi_si: Interface detection"
+ " failed\n");
rv = -ENODEV;
goto out_err;
}
/* Attempt a get device id command. If it fails, we probably
- don't have a SMI here. */
+ don't have a BMC here. */
rv = try_get_dev_id(new_smi);
- if (rv)
+ if (rv) {
+ if (new_smi->addr_source)
+ printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+ " at this location\n");
goto out_err;
+ }
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
/* Try to claim any interrupts. */
- new_smi->irq_setup(new_smi);
+ if (new_smi->irq_setup)
+ new_smi->irq_setup(new_smi);
INIT_LIST_HEAD(&(new_smi->xmit_msgs));
INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
@@ -2308,7 +2330,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
new_smi->interrupt_disabled = 0;
atomic_set(&new_smi->stop_operation, 0);
- new_smi->intf_num = intf_num;
+ new_smi->intf_num = smi_num;
+ smi_num++;
/* Start clearing the flags before we enable interrupts or the
timer to avoid racing with the timer. */
@@ -2332,10 +2355,36 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
+ if (!new_smi->dev) {
+ /* If we don't already have a device from something
+ * else (like PCI), then register a new one. */
+ new_smi->pdev = platform_device_alloc("ipmi_si",
+ new_smi->intf_num);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si_intf:"
+ " Unable to allocate platform device\n");
+ goto out_err_stop_timer;
+ }
+ new_smi->dev = &new_smi->pdev->dev;
+ new_smi->dev->driver = &ipmi_driver;
+
+ rv = platform_device_register(new_smi->pdev);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si_intf:"
+ " Unable to register system interface device:"
+ " %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+ new_smi->dev_registered = 1;
+ }
+
rv = ipmi_register_smi(&handlers,
new_smi,
- ipmi_version_major(&new_smi->device_id),
- ipmi_version_minor(&new_smi->device_id),
+ &new_smi->device_id,
+ new_smi->dev,
new_smi->slave_addr,
&(new_smi->intf));
if (rv) {
@@ -2365,9 +2414,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
goto out_err_stop_timer;
}
- *smi = new_smi;
+ list_add_tail(&new_smi->link, &smi_infos);
+
+ up(&smi_infos_lock);
- printk(" IPMI %s interface initialized\n", si_type[intf_num]);
+ printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
return 0;
@@ -2379,7 +2430,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
if (new_smi->intf)
ipmi_unregister_smi(new_smi->intf);
- new_smi->irq_cleanup(new_smi);
+ if (new_smi->irq_cleanup)
+ new_smi->irq_cleanup(new_smi);
/* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the
@@ -2391,23 +2443,41 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
}
+ if (new_smi->addr_source_cleanup)
+ new_smi->addr_source_cleanup(new_smi);
if (new_smi->io_cleanup)
new_smi->io_cleanup(new_smi);
+ if (new_smi->dev_registered)
+ platform_device_unregister(new_smi->pdev);
+
+ kfree(new_smi);
+
+ up(&smi_infos_lock);
+
return rv;
}
-static __init int init_ipmi_si(void)
+static __devinit int init_ipmi_si(void)
{
- int rv = 0;
- int pos = 0;
int i;
char *str;
+ int rv;
if (initialized)
return 0;
initialized = 1;
+ /* Register the device drivers. */
+ rv = driver_register(&ipmi_driver);
+ if (rv) {
+ printk(KERN_ERR
+ "init_ipmi_si: Unable to register driver: %d\n",
+ rv);
+ return rv;
+ }
+
+
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
@@ -2425,63 +2495,66 @@ static __init int init_ipmi_si(void)
printk(KERN_INFO "IPMI System Interface driver.\n");
+ hardcode_find_bmc();
+
#ifdef CONFIG_DMI
dmi_find_bmc();
#endif
- rv = init_one_smi(0, &(smi_infos[pos]));
- if (rv && ! ports[0] && si_trydefaults) {
- /* If we are trying defaults and the initial port is
- not set, then set it. */
- si_type[0] = "kcs";
- ports[0] = DEFAULT_KCS_IO_PORT;
- rv = init_one_smi(0, &(smi_infos[pos]));
- if (rv) {
- /* No KCS - try SMIC */
- si_type[0] = "smic";
- ports[0] = DEFAULT_SMIC_IO_PORT;
- rv = init_one_smi(0, &(smi_infos[pos]));
- }
- if (rv) {
- /* No SMIC - try BT */
- si_type[0] = "bt";
- ports[0] = DEFAULT_BT_IO_PORT;
- rv = init_one_smi(0, &(smi_infos[pos]));
- }
- }
- if (rv == 0)
- pos++;
+#ifdef CONFIG_ACPI
+ if (si_trydefaults)
+ acpi_find_bmc();
+#endif
- for (i = 1; i < SI_MAX_PARMS; i++) {
- rv = init_one_smi(i, &(smi_infos[pos]));
- if (rv == 0)
- pos++;
+#ifdef CONFIG_PCI
+ pci_module_init(&ipmi_pci_driver);
+#endif
+
+ if (si_trydefaults) {
+ down(&smi_infos_lock);
+ if (list_empty(&smi_infos)) {
+ /* No BMC was found, try defaults. */
+ up(&smi_infos_lock);
+ default_find_bmc();
+ } else {
+ up(&smi_infos_lock);
+ }
}
- if (smi_infos[0] == NULL) {
+ down(&smi_infos_lock);
+ if (list_empty(&smi_infos)) {
+ up(&smi_infos_lock);
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&ipmi_pci_driver);
+#endif
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
+ } else {
+ up(&smi_infos_lock);
+ return 0;
}
-
- return 0;
}
module_init(init_ipmi_si);
-static void __exit cleanup_one_si(struct smi_info *to_clean)
+static void __devexit cleanup_one_si(struct smi_info *to_clean)
{
int rv;
unsigned long flags;
- if (! to_clean)
+ if (!to_clean)
return;
+ list_del(&to_clean->link);
+
/* Tell the timer and interrupt handlers that we are shutting
down. */
spin_lock_irqsave(&(to_clean->si_lock), flags);
spin_lock(&(to_clean->msg_lock));
atomic_inc(&to_clean->stop_operation);
- to_clean->irq_cleanup(to_clean);
+
+ if (to_clean->irq_cleanup)
+ to_clean->irq_cleanup(to_clean);
spin_unlock(&(to_clean->msg_lock));
spin_unlock_irqrestore(&(to_clean->si_lock), flags);
@@ -2511,20 +2584,34 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean->si_sm);
+ if (to_clean->addr_source_cleanup)
+ to_clean->addr_source_cleanup(to_clean);
if (to_clean->io_cleanup)
to_clean->io_cleanup(to_clean);
+
+ if (to_clean->dev_registered)
+ platform_device_unregister(to_clean->pdev);
+
+ kfree(to_clean);
}
static __exit void cleanup_ipmi_si(void)
{
- int i;
+ struct smi_info *e, *tmp_e;
- if (! initialized)
+ if (!initialized)
return;
- for (i = 0; i < SI_MAX_DRIVERS; i++) {
- cleanup_one_si(smi_infos[i]);
- }
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&ipmi_pci_driver);
+#endif
+
+ down(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+ cleanup_one_si(e);
+ up(&smi_infos_lock);
+
+ driver_unregister(&ipmi_driver);
}
module_exit(cleanup_ipmi_si);
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index bf3d4962d6a..4b731b24dc1 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -50,11 +50,12 @@ struct si_sm_io
/* Generic info used by the actual handling routines, the
state machine shouldn't touch these. */
- void *info;
void __iomem *addr;
int regspacing;
int regsize;
int regshift;
+ int addr_type;
+ long addr_data;
};
/* Results of SMI events. */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 1f3159eb1ed..616539310d9 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -996,7 +996,7 @@ static struct notifier_block wdog_panic_notifier = {
};
-static void ipmi_new_smi(int if_num)
+static void ipmi_new_smi(int if_num, struct device *device)
{
ipmi_register_watchdog(if_num);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 26d0116b48d..5245ba1649e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -88,21 +88,15 @@ static inline int uncached_access(struct file *file, unsigned long addr)
}
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
{
- unsigned long end_mem;
-
- end_mem = __pa(high_memory);
- if (addr >= end_mem)
+ if (addr + count > __pa(high_memory))
return 0;
- if (*count > end_mem - addr)
- *count = end_mem - addr;
-
return 1;
}
-static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size)
+static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size)
{
return 1;
}
@@ -119,7 +113,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
ssize_t read, sz;
char *ptr;
- if (!valid_phys_addr_range(p, &count))
+ if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
@@ -177,7 +171,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
unsigned long copied;
void *ptr;
- if (!valid_phys_addr_range(p, &count))
+ if (!valid_phys_addr_range(p, count))
return -EFAULT;
written = 0;
@@ -249,7 +243,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
size_t size = vma->vm_end - vma->vm_start;
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size))
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
return -EINVAL;
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 4c272189cd4..2546637a55c 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -767,6 +767,7 @@ static int __init tlclk_init(void)
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
return ret;
}
+ tlclk_major = ret;
alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
if (!alarm_events)
goto out1;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 52f3eb45d2b..b582d0cdc24 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -64,35 +64,35 @@ config EDAC_AMD76X
config EDAC_E7XXX
tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && X86_32
help
Support for error detection and correction on the Intel
E7205, E7500, E7501 and E7505 server chipsets.
config EDAC_E752X
tristate "Intel e752x (e7520, e7525, e7320)"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && X86
help
Support for error detection and correction on the Intel
E7520, E7525, E7320 server chipsets.
config EDAC_I82875P
tristate "Intel 82875p (D82875P, E7210)"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && X86_32
help
Support for error detection and correction on the Intel
DP82785P and E7210 server chipsets.
config EDAC_I82860
tristate "Intel 82860"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && X86_32
help
Support for error detection and correction on the Intel
82860 chipset.
config EDAC_R82600
tristate "Radisys 82600 embedded chipset"
- depends on EDAC_MM_EDAC
+ depends on EDAC_MM_EDAC && PCI && X86_32
help
Support for error detection and correction on the Radisys
82600 embedded chipset.
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 2fcc8120b53..53423ad6d4a 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -12,25 +12,26 @@
*
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/pci.h>
#include <linux/pci_ids.h>
-
#include <linux/slab.h>
-
#include "edac_mc.h"
+#define amd76x_printk(level, fmt, arg...) \
+ edac_printk(level, "amd76x", fmt, ##arg)
+
+#define amd76x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
#define AMD76X_NR_CHANS 1
#define AMD76X_NR_DIMMS 4
-
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+
#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
*
* 31:16 reserved
@@ -42,6 +43,7 @@
* 7:4 UE cs row
* 3:0 CE cs row
*/
+
#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
*
* 31:26 clock disable 5 - 0
@@ -56,6 +58,7 @@
* 15:8 reserved
* 7:0 x4 mode enable 7 - 0
*/
+
#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
*
* 31:23 chip-select base
@@ -66,29 +69,28 @@
* 0 chip-select enable
*/
-
struct amd76x_error_info {
u32 ecc_mode_status;
};
-
enum amd76x_chips {
AMD761 = 0,
AMD762
};
-
struct amd76x_dev_info {
const char *ctl_name;
};
-
static const struct amd76x_dev_info amd76x_devs[] = {
- [AMD761] = {.ctl_name = "AMD761"},
- [AMD762] = {.ctl_name = "AMD762"},
+ [AMD761] = {
+ .ctl_name = "AMD761"
+ },
+ [AMD762] = {
+ .ctl_name = "AMD762"
+ },
};
-
/**
* amd76x_get_error_info - fetch error information
* @mci: Memory controller
@@ -97,23 +99,21 @@ static const struct amd76x_dev_info amd76x_devs[] = {
* Fetch and store the AMD76x ECC status. Clear pending status
* on the chip so that further errors will be reported
*/
-
-static void amd76x_get_error_info (struct mem_ctl_info *mci,
- struct amd76x_error_info *info)
+static void amd76x_get_error_info(struct mem_ctl_info *mci,
+ struct amd76x_error_info *info)
{
pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
&info->ecc_mode_status);
if (info->ecc_mode_status & BIT(8))
pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(8), (u32) BIT(8));
+ (u32) BIT(8), (u32) BIT(8));
if (info->ecc_mode_status & BIT(9))
pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(9), (u32) BIT(9));
+ (u32) BIT(9), (u32) BIT(9));
}
-
/**
* amd76x_process_error_info - Error check
* @mci: Memory controller
@@ -124,8 +124,7 @@ static void amd76x_get_error_info (struct mem_ctl_info *mci,
* A return of 1 indicates an error. Also if handle_errors is true
* then attempt to handle and clean up after the error
*/
-
-static int amd76x_process_error_info (struct mem_ctl_info *mci,
+static int amd76x_process_error_info(struct mem_ctl_info *mci,
struct amd76x_error_info *info, int handle_errors)
{
int error_found;
@@ -141,9 +140,8 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci,
- mci->csrows[row].first_page, 0, row,
- mci->ctl_name);
+ edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
+ row, mci->ctl_name);
}
}
@@ -155,11 +153,11 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci,
- mci->csrows[row].first_page, 0, 0, row, 0,
- mci->ctl_name);
+ edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
+ 0, row, 0, mci->ctl_name);
}
}
+
return error_found;
}
@@ -170,16 +168,14 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
* Called by the poll handlers this function reads the status
* from the controller and checks for errors.
*/
-
static void amd76x_check(struct mem_ctl_info *mci)
{
struct amd76x_error_info info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
amd76x_get_error_info(mci, &info);
amd76x_process_error_info(mci, &info, 1);
}
-
/**
* amd76x_probe1 - Perform set up for detected device
* @pdev; PCI device detected
@@ -189,7 +185,6 @@ static void amd76x_check(struct mem_ctl_info *mci)
* controller status reporting. We configure and set up the
* memory controller reporting and claim the device.
*/
-
static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
@@ -203,12 +198,11 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
};
u32 ems;
u32 ems_mode;
+ struct amd76x_error_info discard;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
-
mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
if (mci == NULL) {
@@ -216,16 +210,13 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail;
}
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
-
- mci->pdev = pci_dev_get(pdev);
+ debugf0("%s(): mci = %p\n", __func__, mci);
+ mci->pdev = pdev;
mci->mtype_cap = MEM_FLAG_RDDR;
-
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
- (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
-
- mci->mod_name = BS_MOD_STR;
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.4.2.5 $";
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->edac_check = amd76x_check;
@@ -240,18 +231,15 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(mci->pdev,
- AMD76X_MEM_BASE_ADDR + (index * 4),
- &mba);
+ AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
if (!(mba & BIT(0)))
continue;
mba_base = mba & 0xff800000UL;
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
-
pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
- &dms);
-
+ &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
@@ -262,40 +250,33 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
csrow->edac_mode = ems_modes[ems_mode];
}
- /* clear counters */
- pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
- (u32) (0x3 << 8));
+ amd76x_get_error_info(mci, &discard); /* clear counters */
if (edac_mc_add_mc(mci)) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
return 0;
fail:
- if (mci) {
- if(mci->pdev)
- pci_dev_put(mci->pdev);
+ if (mci != NULL)
edac_mc_free(mci);
- }
return rc;
}
/* returns count (>= 0), or negative on error */
static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
/* don't need to call pci_device_enable() */
return amd76x_probe1(pdev, ent->driver_data);
}
-
/**
* amd76x_remove_one - driver shutdown
* @pdev: PCI device being handed back
@@ -304,35 +285,36 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
* structure for the device then delete the mci and free the
* resources.
*/
-
static void __devexit amd76x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
- if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
return;
- if (edac_mc_del_mc(mci))
- return;
- pci_dev_put(mci->pdev);
+
edac_mc_free(mci);
}
-
static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
- {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD762},
- {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD761},
- {0,} /* 0 terminated list. */
+ {
+ PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762
+ },
+ {
+ PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD761
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
-
static struct pci_driver amd76x_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = amd76x_init_one,
.remove = __devexit_p(amd76x_remove_one),
.id_table = amd76x_pci_tbl,
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index c454ded2b06..66572c5323a 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -17,18 +17,19 @@
*
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/pci.h>
#include <linux/pci_ids.h>
-
#include <linux/slab.h>
-
#include "edac_mc.h"
+#define e752x_printk(level, fmt, arg...) \
+ edac_printk(level, "e752x", fmt, ##arg)
+
+#define e752x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_7520_0
#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
@@ -56,7 +57,6 @@
#define E752X_NR_CSROWS 8 /* number of csrows */
-
/* E752X register addresses - device 0 function 0 */
#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -156,7 +156,6 @@ enum e752x_chips {
E7320 = 2
};
-
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
@@ -170,9 +169,9 @@ struct e752x_pvt {
const struct e752x_dev_info *dev_info;
};
-
struct e752x_dev_info {
u16 err_dev;
+ u16 ctl_dev;
const char *ctl_name;
};
@@ -198,38 +197,47 @@ struct e752x_error_info {
static const struct e752x_dev_info e752x_devs[] = {
[E7520] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
- .ctl_name = "E7520"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
+ .ctl_name = "E7520"
+ },
[E7525] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
- .ctl_name = "E7525"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
+ .ctl_name = "E7525"
+ },
[E7320] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
- .ctl_name = "E7320"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
+ .ctl_name = "E7320"
+ },
};
-
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
+ unsigned long page)
{
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
if (page < pvt->tolm)
return page;
+
if ((page >= 0x100000) && (page < pvt->remapbase))
return page;
+
remap = (page - pvt->tolm) + pvt->remapbase;
+
if (remap < pvt->remaplimit)
return remap;
- printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+
+ e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
return pvt->tolm - 1;
}
static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome)
+ u32 sec1_add, u16 sec1_syndrome)
{
u32 page;
int row;
@@ -237,7 +245,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
int i;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
/* convert the addr to 4k page */
page = sec1_add >> (PAGE_SHIFT - 4);
@@ -246,36 +254,37 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
if (pvt->mc_symmetric) {
/* chip select are bits 14 & 13 */
row = ((page >> 1) & 3);
- printk(KERN_WARNING
- "Test row %d Table %d %d %d %d %d %d %d %d\n",
- row, pvt->map[0], pvt->map[1], pvt->map[2],
- pvt->map[3], pvt->map[4], pvt->map[5],
- pvt->map[6], pvt->map[7]);
+ e752x_printk(KERN_WARNING,
+ "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
+ pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
+ pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]);
/* test for channel remapping */
for (i = 0; i < 8; i++) {
if (pvt->map[i] == row)
break;
}
- printk(KERN_WARNING "Test computed row %d\n", i);
+
+ e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
+
if (i < 8)
row = i;
else
- printk(KERN_WARNING
- "MC%d: row %d not found in remap table\n",
- mci->mc_idx, row);
+ e752x_mc_printk(mci, KERN_WARNING,
+ "row %d not found in remap table\n", row);
} else
row = edac_mc_find_csrow_by_page(mci, page);
+
/* 0 = channel A, 1 = channel B */
channel = !(error_one & 1);
if (!pvt->map_type)
row = 7 - row;
+
edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
- "e752x CE");
+ "e752x CE");
}
-
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
u32 sec1_add, u16 sec1_syndrome, int *error_found,
int handle_error)
@@ -286,36 +295,42 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
}
-static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
- u32 scrb_add)
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
+ u32 ded_add, u32 scrb_add)
{
u32 error_2b, block_page;
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
if (error_one & 0x0202) {
error_2b = ded_add;
+
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
+
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+
edac_mc_handle_ue(mci, block_page, 0, row,
- "e752x UE from Read");
+ "e752x UE from Read");
}
if (error_one & 0x0404) {
error_2b = scrb_add;
+
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
+
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+
edac_mc_handle_ue(mci, block_page, 0, row,
- "e752x UE from Scruber");
+ "e752x UE from Scruber");
}
}
@@ -336,7 +351,7 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
if (!handle_error)
return;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
}
@@ -348,13 +363,13 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
error_1b = retry_add;
- page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
+ page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
row = pvt->mc_symmetric ?
- ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
- edac_mc_find_csrow_by_page(mci, page);
- printk(KERN_WARNING
- "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
- mci->mc_idx, (long unsigned int) page, row);
+ ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+ edac_mc_find_csrow_by_page(mci, page);
+ e752x_mc_printk(mci, KERN_WARNING,
+ "CE page 0x%lx, row %d : Memory read retry\n",
+ (long unsigned int) page, row);
}
static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -372,8 +387,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
*error_found = 1;
if (handle_error)
- printk(KERN_WARNING "MC%d: Memory threshold CE\n",
- mci->mc_idx);
+ e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
}
static char *global_message[11] = {
@@ -391,8 +405,8 @@ static void do_global_error(int fatal, u32 errors)
for (i = 0; i < 11; i++) {
if (errors & (1 << i))
- printk(KERN_WARNING "%sError %s\n",
- fatal_message[fatal], global_message[i]);
+ e752x_printk(KERN_WARNING, "%sError %s\n",
+ fatal_message[fatal], global_message[i]);
}
}
@@ -418,8 +432,8 @@ static void do_hub_error(int fatal, u8 errors)
for (i = 0; i < 7; i++) {
if (errors & (1 << i))
- printk(KERN_WARNING "%sError %s\n",
- fatal_message[fatal], hub_message[i]);
+ e752x_printk(KERN_WARNING, "%sError %s\n",
+ fatal_message[fatal], hub_message[i]);
}
}
@@ -445,8 +459,8 @@ static void do_membuf_error(u8 errors)
for (i = 0; i < 4; i++) {
if (errors & (1 << i))
- printk(KERN_WARNING "Non-Fatal Error %s\n",
- membuf_message[i]);
+ e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
+ membuf_message[i]);
}
}
@@ -458,8 +472,7 @@ static inline void membuf_error(u8 errors, int *error_found, int handle_error)
do_membuf_error(errors);
}
-#if 0
-char *sysbus_message[10] = {
+static char *sysbus_message[10] = {
"Addr or Request Parity",
"Data Strobe Glitch",
"Addr Strobe Glitch",
@@ -470,7 +483,6 @@ char *sysbus_message[10] = {
"Memory Parity",
"IO Subsystem Parity"
};
-#endif /* 0 */
static void do_sysbus_error(int fatal, u32 errors)
{
@@ -478,8 +490,8 @@ static void do_sysbus_error(int fatal, u32 errors)
for (i = 0; i < 10; i++) {
if (errors & (1 << i))
- printk(KERN_WARNING "%sError System Bus %s\n",
- fatal_message[fatal], global_message[i]);
+ e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
+ fatal_message[fatal], sysbus_message[i]);
}
}
@@ -492,33 +504,42 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found,
do_sysbus_error(fatal, errors);
}
-static void e752x_check_hub_interface (struct e752x_error_info *info,
+static void e752x_check_hub_interface(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u8 stat8;
//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+
stat8 = info->hi_ferr;
+
if(stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
+
if(stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
if(stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
+
//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+
stat8 = info->hi_nerr;
+
if(stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
+
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
if(stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
}
-static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
- int handle_error)
+static void e752x_check_sysbus(struct e752x_error_info *info,
+ int *error_found, int handle_error)
{
u32 stat32, error32;
@@ -530,27 +551,34 @@ static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
error32 = (stat32 >> 16) & 0x3ff;
stat32 = stat32 & 0x3ff;
+
if(stat32 & 0x083)
sysbus_error(1, stat32 & 0x083, error_found, handle_error);
+
if(stat32 & 0x37c)
sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
+
if(error32 & 0x083)
sysbus_error(1, error32 & 0x083, error_found, handle_error);
+
if(error32 & 0x37c)
sysbus_error(0, error32 & 0x37c, error_found, handle_error);
}
-static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
- int handle_error)
+static void e752x_check_membuf (struct e752x_error_info *info,
+ int *error_found, int handle_error)
{
u8 stat8;
stat8 = info->buf_ferr;
+
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
+
stat8 = info->buf_nerr;
+
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
@@ -558,7 +586,8 @@ static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
}
static void e752x_check_dram (struct mem_ctl_info *mci,
- struct e752x_error_info *info, int *error_found, int handle_error)
+ struct e752x_error_info *info, int *error_found,
+ int handle_error)
{
u16 error_one, error_next;
@@ -608,7 +637,7 @@ static void e752x_check_dram (struct mem_ctl_info *mci,
}
static void e752x_get_error_info (struct mem_ctl_info *mci,
- struct e752x_error_info *info)
+ struct e752x_error_info *info)
{
struct pci_dev *dev;
struct e752x_pvt *pvt;
@@ -616,7 +645,6 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
memset(info, 0, sizeof(*info));
pvt = (struct e752x_pvt *) mci->pvt_info;
dev = pvt->dev_d0f1;
-
pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
if (info->ferr_global) {
@@ -727,7 +755,8 @@ static int e752x_process_error_info (struct mem_ctl_info *mci,
static void e752x_check(struct mem_ctl_info *mci)
{
struct e752x_error_info info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ debugf3("%s()\n", __func__);
e752x_get_error_info(mci, &info);
e752x_process_error_info(mci, &info, 1);
}
@@ -736,23 +765,21 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
int index;
- u16 pci_data, stat;
- u32 stat32;
- u16 stat16;
+ u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci = NULL;
struct e752x_pvt *pvt = NULL;
u16 ddrcsr;
u32 drc;
- int drc_chan; /* Number of channels 0=1chan,1=2chan */
- int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
- int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ int drc_chan; /* Number of channels 0=1chan,1=2chan */
+ int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 dra;
unsigned long last_cumul_size;
- struct pci_dev *pres_dev;
struct pci_dev *dev = NULL;
+ struct e752x_error_info discard;
- debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+ debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
/* enable device 0 function 1 */
@@ -776,34 +803,35 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail;
}
- debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
-
+ debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
- mci->mod_name = BS_MOD_STR;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.5.2.11 $";
mci->pdev = pdev;
- debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ debugf3("%s(): init pvt\n", __func__);
pvt = (struct e752x_pvt *) mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev,
- pvt->bridge_ck);
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+
if (pvt->bridge_ck == NULL)
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
- PCI_DEVFN(0, 1));
+ PCI_DEVFN(0, 1));
+
if (pvt->bridge_ck == NULL) {
- printk(KERN_ERR "MC: error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ e752x_printk(KERN_ERR, "error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
goto fail;
}
- pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
- debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+ debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
@@ -820,6 +848,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u8 value;
u32 cumul_size;
+
/* mem_dev 0=x8, 1=x4 */
int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
struct csrow_info *csrow = &mci->csrows[index];
@@ -828,17 +857,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
/* convert a 128 or 64 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
- __func__, index, cumul_size);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+
if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
/*
@@ -862,29 +892,32 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
u8 value;
u8 last = 0;
u8 row = 0;
- for (index = 0; index < 8; index += 2) {
+ for (index = 0; index < 8; index += 2) {
pci_read_config_byte(mci->pdev, E752X_DRB + index,
- &value);
+ &value);
+
/* test if there is a dimm in this slot */
if (value == last) {
/* no dimm in the slot, so flag it as empty */
pvt->map[index] = 0xff;
pvt->map[index + 1] = 0xff;
- } else { /* there is a dimm in the slot */
+ } else { /* there is a dimm in the slot */
pvt->map[index] = row;
row++;
last = value;
/* test the next value to see if the dimm is
double sided */
pci_read_config_byte(mci->pdev,
- E752X_DRB + index + 1,
- &value);
+ E752X_DRB + index + 1,
+ &value);
pvt->map[index + 1] = (value == last) ?
- 0xff : /* the dimm is single sided,
- so flag as empty */
- row; /* this is a double sided dimm
- to save the next row # */
+ 0xff : /* the dimm is single sided,
+ * so flag as empty
+ */
+ row; /* this is a double sided dimm
+ * to save the next row #
+ */
row++;
last = value;
}
@@ -896,9 +929,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
mci->edac_cap |= EDAC_FLAG_NONE;
+ debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
- debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
- __func__);
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
@@ -906,43 +938,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pvt->remapbase = ((u32) pci_data) << 14;
pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
- printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
+ e752x_printk(KERN_INFO,
+ "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
if (edac_mc_add_mc(mci)) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n",
- __func__);
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
- /* Walk through the PCI table and clear errors */
- switch (dev_idx) {
- case E7520:
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_7520_0, NULL);
- break;
- case E7525:
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_7525_0, NULL);
- break;
- case E7320:
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_7320_0, NULL);
- break;
- }
-
-
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
+ NULL);
pvt->dev_d0f0 = dev;
- for (pres_dev = dev;
- ((struct pci_dev *) pres_dev->global_list.next != dev);
- pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
- pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
- stat = (u16) (stat32 >> 16);
- /* clear any error bits */
- if (stat32 & ((1 << 6) + (1 << 8)))
- pci_write_config_word(pres_dev, PCI_STATUS, stat);
- }
/* find the error reporting device and clear errors */
dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
/* Turn off error disable & SMI in case the BIOS turned it on */
@@ -954,67 +961,51 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
- /* clear other MCH errors */
- pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
- pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
- pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
- pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
- pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
- pci_write_config_byte(dev, E752X_HI_FERR, stat8);
- pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
- pci_write_config_byte(dev, E752X_HI_NERR, stat8);
- pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
- pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
- pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
- pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
- pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
- pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
- pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
- pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
- pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
- pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
+
+ e752x_get_error_info(mci, &discard); /* clear other MCH errors */
/* get this far and it's successful */
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
return 0;
fail:
if (mci) {
if (pvt->dev_d0f0)
pci_dev_put(pvt->dev_d0f0);
+
if (pvt->dev_d0f1)
pci_dev_put(pvt->dev_d0f1);
+
if (pvt->bridge_ck)
pci_dev_put(pvt->bridge_ck);
+
edac_mc_free(mci);
}
+
return rc;
}
/* returns count (>= 0), or negative on error */
static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
/* wake up and enable device */
if(pci_enable_device(pdev) < 0)
return -EIO;
+
return e752x_probe1(pdev, ent->driver_data);
}
-
static void __devexit e752x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
- debugf0(__FILE__ ": %s()\n", __func__);
-
- if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
- return;
+ debugf0("%s()\n", __func__);
- if (edac_mc_del_mc(mci))
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
return;
pvt = (struct e752x_pvt *) mci->pvt_info;
@@ -1024,45 +1015,48 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-
static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
- {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7520},
- {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7525},
- {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7320},
- {0,} /* 0 terminated list. */
+ {
+ PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520
+ },
+ {
+ PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7525
+ },
+ {
+ PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7320
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
-
static struct pci_driver e752x_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = e752x_init_one,
.remove = __devexit_p(e752x_remove_one),
.id_table = e752x_pci_tbl,
};
-
static int __init e752x_init(void)
{
int pci_rc;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
pci_rc = pci_register_driver(&e752x_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
-
static void __exit e752x_exit(void)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
pci_unregister_driver(&e752x_driver);
}
-
module_init(e752x_init);
module_exit(e752x_exit);
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index d5e320dfc66..a9518d3e4be 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -11,9 +11,9 @@
* http://www.anime.net/~goemon/linux-ecc/
*
* Contributors:
- * Eric Biederman (Linux Networx)
- * Tom Zimmerman (Linux Networx)
- * Jim Garlick (Lawrence Livermore National Labs)
+ * Eric Biederman (Linux Networx)
+ * Tom Zimmerman (Linux Networx)
+ * Jim Garlick (Lawrence Livermore National Labs)
* Dave Peterson (Lawrence Livermore National Labs)
* That One Guy (Some other place)
* Wang Zhenyu (intel.com)
@@ -22,7 +22,6 @@
*
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -31,6 +30,11 @@
#include <linux/slab.h>
#include "edac_mc.h"
+#define e7xxx_printk(level, fmt, arg...) \
+ edac_printk(level, "e7xxx", fmt, ##arg)
+
+#define e7xxx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_7205_0
#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
@@ -64,11 +68,9 @@
#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
-
#define E7XXX_NR_CSROWS 8 /* number of csrows */
#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
-
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -118,7 +120,6 @@ enum e7xxx_chips {
E7205,
};
-
struct e7xxx_pvt {
struct pci_dev *bridge_ck;
u32 tolm;
@@ -127,13 +128,11 @@ struct e7xxx_pvt {
const struct e7xxx_dev_info *dev_info;
};
-
struct e7xxx_dev_info {
u16 err_dev;
const char *ctl_name;
};
-
struct e7xxx_error_info {
u8 dram_ferr;
u8 dram_nerr;
@@ -144,108 +143,110 @@ struct e7xxx_error_info {
static const struct e7xxx_dev_info e7xxx_devs[] = {
[E7500] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
- .ctl_name = "E7500"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+ .ctl_name = "E7500"
+ },
[E7501] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
- .ctl_name = "E7501"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+ .ctl_name = "E7501"
+ },
[E7505] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
- .ctl_name = "E7505"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+ .ctl_name = "E7505"
+ },
[E7205] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
- .ctl_name = "E7205"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+ .ctl_name = "E7205"
+ },
};
-
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
static inline int e7xxx_find_channel(u16 syndrome)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
if ((syndrome & 0xff00) == 0)
return 0;
+
if ((syndrome & 0x00ff) == 0)
return 1;
+
if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
return 0;
+
return 1;
}
-
-static unsigned long
-ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+ unsigned long page)
{
u32 remap;
struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
if ((page < pvt->tolm) ||
- ((page >= 0x100000) && (page < pvt->remapbase)))
+ ((page >= 0x100000) && (page < pvt->remapbase)))
return page;
+
remap = (page - pvt->tolm) + pvt->remapbase;
+
if (remap < pvt->remaplimit)
return remap;
- printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+
+ e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
return pvt->tolm - 1;
}
-
-static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+static void process_ce(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
{
u32 error_1b, page;
u16 syndrome;
int row;
int channel;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf3("%s()\n", __func__);
/* read the error address */
error_1b = info->dram_celog_add;
/* FIXME - should use PAGE_SHIFT */
- page = error_1b >> 6; /* convert the address to 4k page */
+ page = error_1b >> 6; /* convert the address to 4k page */
/* read the syndrome */
syndrome = info->dram_celog_syndrome;
/* FIXME - check for -1 */
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
- "e7xxx CE");
+ edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
}
-
static void process_ce_no_info(struct mem_ctl_info *mci)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
}
-
-static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+static void process_ue(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
{
u32 error_2b, block_page;
int row;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf3("%s()\n", __func__);
/* read the error address */
error_2b = info->dram_uelog_add;
/* FIXME - should use PAGE_SHIFT */
- block_page = error_2b >> 6; /* convert to 4k address */
+ block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
}
-
static void process_ue_no_info(struct mem_ctl_info *mci)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
}
-
static void e7xxx_get_error_info (struct mem_ctl_info *mci,
struct e7xxx_error_info *info)
{
@@ -253,31 +254,29 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci,
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
- &info->dram_ferr);
+ &info->dram_ferr);
pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
- &info->dram_nerr);
+ &info->dram_nerr);
if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
- &info->dram_celog_add);
+ &info->dram_celog_add);
pci_read_config_word(pvt->bridge_ck,
- E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
+ E7XXX_DRAM_CELOG_SYNDROME,
+ &info->dram_celog_syndrome);
}
if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
- &info->dram_uelog_add);
+ &info->dram_uelog_add);
if (info->dram_ferr & 3)
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
- 0x03);
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
if (info->dram_nerr & 3)
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
- 0x03);
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
}
-
static int e7xxx_process_error_info (struct mem_ctl_info *mci,
struct e7xxx_error_info *info, int handle_errors)
{
@@ -325,17 +324,15 @@ static int e7xxx_process_error_info (struct mem_ctl_info *mci,
return error_found;
}
-
static void e7xxx_check(struct mem_ctl_info *mci)
{
struct e7xxx_error_info info;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
e7xxx_get_error_info(mci, &info);
e7xxx_process_error_info(mci, &info, 1);
}
-
static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
@@ -349,19 +346,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 dra;
unsigned long last_cumul_size;
+ struct e7xxx_error_info discard;
-
- debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+ debugf0("%s(): mci\n", __func__);
/* need to find out the number of channels */
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+
/* only e7501 can be single channel */
if (dev_idx == E7501) {
drc_chan = ((drc >> 22) & 0x1);
drc_drbg = (drc >> 18) & 0x3;
}
- drc_ddim = (drc >> 20) & 0x3;
+ drc_ddim = (drc >> 20) & 0x3;
mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
if (mci == NULL) {
@@ -369,33 +367,31 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
goto fail;
}
- debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
-
+ debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
- mci->edac_ctl_cap =
- EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+ EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
- mci->mod_name = BS_MOD_STR;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.5.2.9 $";
mci->pdev = pdev;
- debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ debugf3("%s(): init pvt\n", __func__);
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev,
- pvt->bridge_ck);
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+
if (!pvt->bridge_ck) {
- printk(KERN_ERR
- "MC: error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+ e7xxx_printk(KERN_ERR, "error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail;
}
- debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
-
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
@@ -418,17 +414,18 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
- __func__, index, cumul_size);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+
if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
/*
@@ -449,8 +446,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
- __func__);
+ debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
@@ -458,22 +454,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pvt->remapbase = ((u32) pci_data) << 14;
pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
- printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
+ e7xxx_printk(KERN_INFO,
+ "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
/* clear any pending errors, or initial state bits */
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
- pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+ e7xxx_get_error_info(mci, &discard);
if (edac_mc_add_mc(mci) != 0) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n",
- __func__);
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
return 0;
fail:
@@ -487,62 +481,67 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit
-e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit e7xxx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
/* wake up and enable device */
return pci_enable_device(pdev) ?
- -EIO : e7xxx_probe1(pdev, ent->driver_data);
+ -EIO : e7xxx_probe1(pdev, ent->driver_data);
}
-
static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
- if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
- edac_mc_del_mc(mci)) {
- pvt = (struct e7xxx_pvt *) mci->pvt_info;
- pci_dev_put(pvt->bridge_ck);
- edac_mc_free(mci);
- }
-}
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
+ return;
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+}
static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
- {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7205},
- {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7500},
- {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7501},
- {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7505},
- {0,} /* 0 terminated list. */
+ {
+ PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205
+ },
+ {
+ PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7500
+ },
+ {
+ PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7501
+ },
+ {
+ PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7505
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
-
static struct pci_driver e7xxx_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = e7xxx_init_one,
.remove = __devexit_p(e7xxx_remove_one),
.id_table = e7xxx_pci_tbl,
};
-
static int __init e7xxx_init(void)
{
return pci_register_driver(&e7xxx_driver);
}
-
static void __exit e7xxx_exit(void)
{
pci_unregister_driver(&e7xxx_driver);
@@ -551,8 +550,7 @@ static void __exit e7xxx_exit(void)
module_init(e7xxx_init);
module_exit(e7xxx_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
+ "Based on.work by Dan Hollis et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9c205274c1c..905f58ba8e1 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -12,7 +12,6 @@
*
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
@@ -29,14 +28,13 @@
#include <linux/list.h>
#include <linux/sysdev.h>
#include <linux/ctype.h>
-
+#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/edac.h>
-
#include "edac_mc.h"
-#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
+#define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__
/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC
* presents to user space needs more thought, and is likely to change
@@ -47,7 +45,7 @@
#ifdef CONFIG_EDAC_DEBUG
/* Values of 0 to 4 will generate output */
int edac_debug_level = 1;
-EXPORT_SYMBOL(edac_debug_level);
+EXPORT_SYMBOL_GPL(edac_debug_level);
#endif
/* EDAC Controls, setable by module parameter, and sysfs */
@@ -64,13 +62,14 @@ static atomic_t pci_parity_count = ATOMIC_INIT(0);
static DECLARE_MUTEX(mem_ctls_mutex);
static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+static struct task_struct *edac_thread;
+
/* Structure of the whitelist and blacklist arrays */
struct edac_pci_device_list {
unsigned int vendor; /* Vendor ID */
unsigned int device; /* Deviice ID */
};
-
#define MAX_LISTED_PCI_DEVICES 32
/* List of PCI devices (vendor-id:device-id) that should be skipped */
@@ -123,7 +122,6 @@ static const char *edac_caps[] = {
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
-
/* sysfs object: /sys/devices/system/edac */
static struct sysdev_class edac_class = {
set_kset_name("edac"),
@@ -136,9 +134,15 @@ static struct sysdev_class edac_class = {
static struct kobject edac_memctrl_kobj;
static struct kobject edac_pci_kobj;
+/* We use these to wait for the reference counts on edac_memctrl_kobj and
+ * edac_pci_kobj to reach 0.
+ */
+static struct completion edac_memctrl_kobj_complete;
+static struct completion edac_pci_kobj_complete;
+
/*
* /sys/devices/system/edac/mc;
- * data structures and methods
+ * data structures and methods
*/
#if 0
static ssize_t memctrl_string_show(void *ptr, char *buffer)
@@ -165,33 +169,34 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
}
struct memctrl_dev_attribute {
- struct attribute attr;
- void *value;
+ struct attribute attr;
+ void *value;
ssize_t (*show)(void *,char *);
ssize_t (*store)(void *, const char *, size_t);
};
/* Set of show/store abstract level functions for memory control object */
-static ssize_t
-memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
+static ssize_t memctrl_dev_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
{
struct memctrl_dev_attribute *memctrl_dev;
memctrl_dev = (struct memctrl_dev_attribute*)attr;
if (memctrl_dev->show)
return memctrl_dev->show(memctrl_dev->value, buffer);
+
return -EIO;
}
-static ssize_t
-memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
{
struct memctrl_dev_attribute *memctrl_dev;
memctrl_dev = (struct memctrl_dev_attribute*)attr;
if (memctrl_dev->store)
return memctrl_dev->store(memctrl_dev->value, buffer, count);
+
return -EIO;
}
@@ -227,7 +232,6 @@ MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-
/* Base Attributes of the memory ECC object */
static struct memctrl_dev_attribute *memctrl_attr[] = {
&attr_panic_on_ue,
@@ -240,13 +244,14 @@ static struct memctrl_dev_attribute *memctrl_attr[] = {
/* Main MC kobject release() function */
static void edac_memctrl_master_release(struct kobject *kobj)
{
- debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+ debugf1("%s()\n", __func__);
+ complete(&edac_memctrl_kobj_complete);
}
static struct kobj_type ktype_memctrl = {
- .release = edac_memctrl_master_release,
- .sysfs_ops = &memctrlfs_ops,
- .default_attrs = (struct attribute **) memctrl_attr,
+ .release = edac_memctrl_master_release,
+ .sysfs_ops = &memctrlfs_ops,
+ .default_attrs = (struct attribute **) memctrl_attr,
};
#endif /* DISABLE_EDAC_SYSFS */
@@ -268,32 +273,31 @@ static int edac_sysfs_memctrl_setup(void)
{
int err=0;
- debugf1("MC: " __FILE__ ": %s()\n", __func__);
+ debugf1("%s()\n", __func__);
/* create the /sys/devices/system/edac directory */
err = sysdev_class_register(&edac_class);
+
if (!err) {
/* Init the MC's kobject */
memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
- kobject_init(&edac_memctrl_kobj);
-
edac_memctrl_kobj.parent = &edac_class.kset.kobj;
edac_memctrl_kobj.ktype = &ktype_memctrl;
/* generate sysfs "..../edac/mc" */
err = kobject_set_name(&edac_memctrl_kobj,"mc");
+
if (!err) {
/* FIXME: maybe new sysdev_create_subdir() */
err = kobject_register(&edac_memctrl_kobj);
- if (err) {
+
+ if (err)
debugf1("Failed to register '.../edac/mc'\n");
- } else {
+ else
debugf1("Registered '.../edac/mc' kobject\n");
- }
}
- } else {
- debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
- }
+ } else
+ debugf1("%s() error=%d\n", __func__, err);
return err;
}
@@ -308,11 +312,12 @@ static void edac_sysfs_memctrl_teardown(void)
#ifndef DISABLE_EDAC_SYSFS
debugf0("MC: " __FILE__ ": %s()\n", __func__);
- /* Unregister the MC's kobject */
+ /* Unregister the MC's kobject and wait for reference count to reach
+ * 0.
+ */
+ init_completion(&edac_memctrl_kobj_complete);
kobject_unregister(&edac_memctrl_kobj);
-
- /* release the master edac mc kobject */
- kobject_put(&edac_memctrl_kobj);
+ wait_for_completion(&edac_memctrl_kobj_complete);
/* Unregister the 'edac' object */
sysdev_class_unregister(&edac_class);
@@ -331,7 +336,6 @@ struct list_control {
int *count;
};
-
#if 0
/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
@@ -356,7 +360,6 @@ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
}
len += snprintf(p + len,(PAGE_SIZE-len), "\n");
-
return (ssize_t) len;
}
@@ -378,7 +381,7 @@ static int parse_one_device(const char **s,const char **e,
/* if null byte, we are done */
if (!**s) {
- (*s)++; /* keep *s moving */
+ (*s)++; /* keep *s moving */
return 0;
}
@@ -395,6 +398,7 @@ static int parse_one_device(const char **s,const char **e,
/* parse vendor_id */
runner = *s;
+
while (runner < *e) {
/* scan for vendor:device delimiter */
if (*runner == ':') {
@@ -402,6 +406,7 @@ static int parse_one_device(const char **s,const char **e,
runner = p + 1;
break;
}
+
runner++;
}
@@ -417,12 +422,11 @@ static int parse_one_device(const char **s,const char **e,
}
*s = runner;
-
return 1;
}
static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
- size_t count)
+ size_t count)
{
struct list_control *listctl;
struct edac_pci_device_list *list;
@@ -432,14 +436,12 @@ static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
s = (char*)buffer;
e = s + count;
-
listctl = ptr;
list = listctl->list;
index = listctl->count;
-
*index = 0;
- while (*index < MAX_LISTED_PCI_DEVICES) {
+ while (*index < MAX_LISTED_PCI_DEVICES) {
if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
list[ *index ].vendor = vendor_id;
list[ *index ].device = device_id;
@@ -472,15 +474,15 @@ static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
}
struct edac_pci_dev_attribute {
- struct attribute attr;
- void *value;
+ struct attribute attr;
+ void *value;
ssize_t (*show)(void *,char *);
ssize_t (*store)(void *, const char *,size_t);
};
/* Set of show/store abstract level functions for PCI Parity object */
static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
+ char *buffer)
{
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -490,8 +492,8 @@ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
return -EIO;
}
-static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t count)
{
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -506,7 +508,6 @@ static struct sysfs_ops edac_pci_sysfs_ops = {
.store = edac_pci_dev_store
};
-
#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -549,9 +550,11 @@ EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
#endif
/* PCI Parity control files */
-EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
-EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
-EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
+EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
/* Base Attributes of the memory ECC object */
static struct edac_pci_dev_attribute *edac_pci_attr[] = {
@@ -564,13 +567,14 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
/* No memory to release */
static void edac_pci_release(struct kobject *kobj)
{
- debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
+ debugf1("%s()\n", __func__);
+ complete(&edac_pci_kobj_complete);
}
static struct kobj_type ktype_edac_pci = {
- .release = edac_pci_release,
- .sysfs_ops = &edac_pci_sysfs_ops,
- .default_attrs = (struct attribute **) edac_pci_attr,
+ .release = edac_pci_release,
+ .sysfs_ops = &edac_pci_sysfs_ops,
+ .default_attrs = (struct attribute **) edac_pci_attr,
};
#endif /* DISABLE_EDAC_SYSFS */
@@ -588,24 +592,24 @@ static int edac_sysfs_pci_setup(void)
{
int err;
- debugf1("MC: " __FILE__ ": %s()\n", __func__);
+ debugf1("%s()\n", __func__);
memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
-
- kobject_init(&edac_pci_kobj);
edac_pci_kobj.parent = &edac_class.kset.kobj;
edac_pci_kobj.ktype = &ktype_edac_pci;
-
err = kobject_set_name(&edac_pci_kobj, "pci");
+
if (!err) {
/* Instanstiate the csrow object */
/* FIXME: maybe new sysdev_create_subdir() */
err = kobject_register(&edac_pci_kobj);
+
if (err)
debugf1("Failed to register '.../edac/pci'\n");
else
debugf1("Registered '.../edac/pci' kobject\n");
}
+
return err;
}
#endif /* DISABLE_EDAC_SYSFS */
@@ -613,10 +617,10 @@ static int edac_sysfs_pci_setup(void)
static void edac_sysfs_pci_teardown(void)
{
#ifndef DISABLE_EDAC_SYSFS
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf0("%s()\n", __func__);
+ init_completion(&edac_pci_kobj_complete);
kobject_unregister(&edac_pci_kobj);
- kobject_put(&edac_pci_kobj);
+ wait_for_completion(&edac_pci_kobj_complete);
#endif
}
@@ -633,6 +637,7 @@ static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
csrow->channels[0].label);
}
+
return size;
}
@@ -644,11 +649,12 @@ static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
csrow->channels[1].label);
}
+
return size;
}
static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
- const char *data, size_t size)
+ const char *data, size_t size)
{
ssize_t max_size = 0;
@@ -657,11 +663,12 @@ static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
strncpy(csrow->channels[0].label, data, max_size);
csrow->channels[0].label[max_size] = '\0';
}
+
return size;
}
static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
- const char *data, size_t size)
+ const char *data, size_t size)
{
ssize_t max_size = 0;
@@ -670,6 +677,7 @@ static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
strncpy(csrow->channels[1].label, data, max_size);
csrow->channels[1].label[max_size] = '\0';
}
+
return max_size;
}
@@ -690,6 +698,7 @@ static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
if (csrow->nr_channels > 0) {
size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
}
+
return size;
}
@@ -700,6 +709,7 @@ static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
if (csrow->nr_channels > 1) {
size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
}
+
return size;
}
@@ -724,7 +734,7 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
}
struct csrowdev_attribute {
- struct attribute attr;
+ struct attribute attr;
ssize_t (*show)(struct csrow_info *,char *);
ssize_t (*store)(struct csrow_info *, const char *,size_t);
};
@@ -734,24 +744,26 @@ struct csrowdev_attribute {
/* Set of show/store higher level functions for csrow objects */
static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
+ char *buffer)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->show)
return csrowdev_attr->show(csrow, buffer);
+
return -EIO;
}
static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+ const char *buffer, size_t count)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->store)
return csrowdev_attr->store(csrow, buffer, count);
+
return -EIO;
}
@@ -785,7 +797,6 @@ CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
csrow_ch1_dimm_label_show,
csrow_ch1_dimm_label_store);
-
/* Attributes of the CSROW<id> object */
static struct csrowdev_attribute *csrow_attr[] = {
&attr_dev_type,
@@ -801,40 +812,43 @@ static struct csrowdev_attribute *csrow_attr[] = {
NULL,
};
-
/* No memory to release */
static void edac_csrow_instance_release(struct kobject *kobj)
{
- debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+ struct csrow_info *cs;
+
+ debugf1("%s()\n", __func__);
+ cs = container_of(kobj, struct csrow_info, kobj);
+ complete(&cs->kobj_complete);
}
static struct kobj_type ktype_csrow = {
- .release = edac_csrow_instance_release,
- .sysfs_ops = &csrowfs_ops,
- .default_attrs = (struct attribute **) csrow_attr,
+ .release = edac_csrow_instance_release,
+ .sysfs_ops = &csrowfs_ops,
+ .default_attrs = (struct attribute **) csrow_attr,
};
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
- struct csrow_info *csrow, int index )
+ struct csrow_info *csrow, int index)
{
int err = 0;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf0("%s()\n", __func__);
memset(&csrow->kobj, 0, sizeof(csrow->kobj));
/* generate ..../edac/mc/mc<id>/csrow<index> */
- kobject_init(&csrow->kobj);
csrow->kobj.parent = edac_mci_kobj;
csrow->kobj.ktype = &ktype_csrow;
/* name this instance of csrow<id> */
err = kobject_set_name(&csrow->kobj,"csrow%d",index);
+
if (!err) {
/* Instanstiate the csrow object */
err = kobject_register(&csrow->kobj);
+
if (err)
debugf0("Failed to register CSROW%d\n",index);
else
@@ -846,8 +860,8 @@ static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
/* sysfs data structures and methods for the MCI kobjects */
-static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
- const char *data, size_t count )
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
{
int row, chan;
@@ -855,16 +869,18 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
mci->ce_noinfo_count = 0;
mci->ue_count = 0;
mci->ce_count = 0;
+
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
ri->ue_count = 0;
ri->ce_count = 0;
+
for (chan = 0; chan < ri->nr_channels; chan++)
ri->channels[chan].ce_count = 0;
}
- mci->start_time = jiffies;
+ mci->start_time = jiffies;
return count;
}
@@ -922,18 +938,16 @@ static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
p += mci_output_edac_cap(p,mci->edac_ctl_cap);
p += sprintf(p, "\n");
-
return p - data;
}
static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
- char *data)
+ char *data)
{
char *p = data;
p += mci_output_edac_cap(p,mci->edac_cap);
p += sprintf(p, "\n");
-
return p - data;
}
@@ -950,13 +964,13 @@ static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
return p - buf;
}
-static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci,
+ char *data)
{
char *p = data;
p += mci_output_mtype_cap(p,mci->mtype_cap);
p += sprintf(p, "\n");
-
return p - data;
}
@@ -970,6 +984,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
if (!csrow->nr_pages)
continue;
+
total_pages += csrow->nr_pages;
}
@@ -977,7 +992,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
}
struct mcidev_attribute {
- struct attribute attr;
+ struct attribute attr;
ssize_t (*show)(struct mem_ctl_info *,char *);
ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
};
@@ -986,30 +1001,32 @@ struct mcidev_attribute {
#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
+ char *buffer)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
+
return -EIO;
}
static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+ const char *buffer, size_t count)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
+
return -EIO;
}
static struct sysfs_ops mci_ops = {
- .show = mcidev_show,
- .store = mcidev_store
+ .show = mcidev_show,
+ .store = mcidev_store
};
#define MCIDEV_ATTR(_name,_mode,_show,_store) \
@@ -1037,7 +1054,6 @@ MCIDEV_ATTR(edac_current_capability,S_IRUGO,
MCIDEV_ATTR(supported_mem_type,S_IRUGO,
mci_supported_mem_type_show,NULL);
-
static struct mcidev_attribute *mci_attr[] = {
&mci_attr_reset_counters,
&mci_attr_module_name,
@@ -1054,25 +1070,22 @@ static struct mcidev_attribute *mci_attr[] = {
NULL
};
-
/*
* Release of a MC controlling instance
*/
static void edac_mci_instance_release(struct kobject *kobj)
{
struct mem_ctl_info *mci;
- mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
- debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
- __func__, mci->mc_idx);
-
- kfree(mci);
+ mci = to_mci(kobj);
+ debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+ complete(&mci->kobj_complete);
}
static struct kobj_type ktype_mci = {
- .release = edac_mci_instance_release,
- .sysfs_ops = &mci_ops,
- .default_attrs = (struct attribute **) mci_attr,
+ .release = edac_mci_instance_release,
+ .sysfs_ops = &mci_ops,
+ .default_attrs = (struct attribute **) mci_attr,
};
#endif /* DISABLE_EDAC_SYSFS */
@@ -1099,13 +1112,12 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
struct csrow_info *csrow;
struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
- debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
-
+ debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
- kobject_init(edac_mci_kobj);
/* set the name of the mc<id> object */
err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
+
if (err)
return err;
@@ -1115,50 +1127,48 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
/* register the mc<id> kobject */
err = kobject_register(edac_mci_kobj);
+
if (err)
return err;
/* create a symlink for the device */
err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
EDAC_DEVICE_SYMLINK);
- if (err) {
- kobject_unregister(edac_mci_kobj);
- return err;
- }
+
+ if (err)
+ goto fail0;
/* Make directories for each CSROW object
* under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
-
csrow = &mci->csrows[i];
/* Only expose populated CSROWs */
if (csrow->nr_pages > 0) {
err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
+
if (err)
- goto fail;
+ goto fail1;
}
}
- /* Mark this MCI instance as having sysfs entries */
- mci->sysfs_active = MCI_SYSFS_ACTIVE;
-
return 0;
-
/* CSROW error: backout what has already been registered, */
-fail:
+fail1:
for ( i--; i >= 0; i--) {
if (csrow->nr_pages > 0) {
+ init_completion(&csrow->kobj_complete);
kobject_unregister(&mci->csrows[i].kobj);
- kobject_put(&mci->csrows[i].kobj);
+ wait_for_completion(&csrow->kobj_complete);
}
}
+fail0:
+ init_completion(&mci->kobj_complete);
kobject_unregister(edac_mci_kobj);
- kobject_put(edac_mci_kobj);
-
+ wait_for_completion(&mci->kobj_complete);
return err;
}
#endif /* DISABLE_EDAC_SYSFS */
@@ -1171,20 +1181,21 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
#ifndef DISABLE_EDAC_SYSFS
int i;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
+ if (mci->csrows[i].nr_pages > 0) {
+ init_completion(&mci->csrows[i].kobj_complete);
kobject_unregister(&mci->csrows[i].kobj);
- kobject_put(&mci->csrows[i].kobj);
+ wait_for_completion(&mci->csrows[i].kobj_complete);
}
}
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
-
+ init_completion(&mci->kobj_complete);
kobject_unregister(&mci->edac_mci_kobj);
- kobject_put(&mci->edac_mci_kobj);
+ wait_for_completion(&mci->kobj_complete);
#endif /* DISABLE_EDAC_SYSFS */
}
@@ -1192,8 +1203,6 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
#ifdef CONFIG_EDAC_DEBUG
-EXPORT_SYMBOL(edac_mc_dump_channel);
-
void edac_mc_dump_channel(struct channel_info *chan)
{
debugf4("\tchannel = %p\n", chan);
@@ -1202,9 +1211,7 @@ void edac_mc_dump_channel(struct channel_info *chan)
debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
}
-
-
-EXPORT_SYMBOL(edac_mc_dump_csrow);
+EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
void edac_mc_dump_csrow(struct csrow_info *csrow)
{
@@ -1220,9 +1227,7 @@ void edac_mc_dump_csrow(struct csrow_info *csrow)
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
}
-
-
-EXPORT_SYMBOL(edac_mc_dump_mci);
+EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
void edac_mc_dump_mci(struct mem_ctl_info *mci)
{
@@ -1238,9 +1243,9 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
}
+EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
-
-#endif /* CONFIG_EDAC_DEBUG */
+#endif /* CONFIG_EDAC_DEBUG */
/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
* Adjust 'ptr' so that its alignment is at least as stringent as what the
@@ -1249,7 +1254,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
* If 'size' is a constant, the compiler will optimize this whole function
* down to either a no-op or the addition of a constant to the value of 'ptr'.
*/
-static inline char * align_ptr (void *ptr, unsigned size)
+static inline char * align_ptr(void *ptr, unsigned size)
{
unsigned align, r;
@@ -1276,9 +1281,6 @@ static inline char * align_ptr (void *ptr, unsigned size)
return (char *) (((unsigned long) ptr) + align - r);
}
-
-EXPORT_SYMBOL(edac_mc_alloc);
-
/**
* edac_mc_alloc: Allocate a struct mem_ctl_info structure
* @size_pvt: size of private storage needed
@@ -1296,7 +1298,7 @@ EXPORT_SYMBOL(edac_mc_alloc);
* struct mem_ctl_info pointer
*/
struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans)
+ unsigned nr_chans)
{
struct mem_ctl_info *mci;
struct csrow_info *csi, *csrow;
@@ -1327,8 +1329,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
- memset(mci, 0, size); /* clear all fields */
-
+ memset(mci, 0, size); /* clear all fields */
mci->csrows = csi;
mci->pvt_info = pvt;
mci->nr_csrows = nr_csrows;
@@ -1350,50 +1351,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
return mci;
}
-
-
-EXPORT_SYMBOL(edac_mc_free);
+EXPORT_SYMBOL_GPL(edac_mc_alloc);
/**
* edac_mc_free: Free a previously allocated 'mci' structure
* @mci: pointer to a struct mem_ctl_info structure
- *
- * Free up a previously allocated mci structure
- * A MCI structure can be in 2 states after being allocated
- * by edac_mc_alloc().
- * 1) Allocated in a MC driver's probe, but not yet committed
- * 2) Allocated and committed, by a call to edac_mc_add_mc()
- * edac_mc_add_mc() is the function that adds the sysfs entries
- * thus, this free function must determine which state the 'mci'
- * structure is in, then either free it directly or
- * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
- *
- * VOID Return
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
- /* only if sysfs entries for this mci instance exist
- * do we remove them and defer the actual kfree via
- * the kobject 'release()' callback.
- *
- * Otherwise, do a straight kfree now.
- */
- if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
- edac_remove_sysfs_mci_device(mci);
- else
- kfree(mci);
+ kfree(mci);
}
+EXPORT_SYMBOL_GPL(edac_mc_free);
-
-
-EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
-
-struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
+static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct list_head *item;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
@@ -1405,7 +1380,7 @@ struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
return NULL;
}
-static int add_mc_to_global_list (struct mem_ctl_info *mci)
+static int add_mc_to_global_list(struct mem_ctl_info *mci)
{
struct list_head *item, *insert_before;
struct mem_ctl_info *p;
@@ -1415,11 +1390,12 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
mci->mc_idx = 0;
insert_before = &mc_devices;
} else {
- if (edac_mc_find_mci_by_pdev(mci->pdev)) {
- printk(KERN_WARNING
- "EDAC MC: %s (%s) %s %s already assigned %d\n",
- mci->pdev->dev.bus_id, pci_name(mci->pdev),
- mci->mod_name, mci->ctl_name, mci->mc_idx);
+ if (find_mci_by_pdev(mci->pdev)) {
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "%s (%s) %s %s already assigned %d\n",
+ mci->pdev->dev.bus_id,
+ pci_name(mci->pdev), mci->mod_name,
+ mci->ctl_name, mci->mc_idx);
return 1;
}
@@ -1447,12 +1423,26 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
return 0;
}
+static void complete_mc_list_del(struct rcu_head *head)
+{
+ struct mem_ctl_info *mci;
+ mci = container_of(head, struct mem_ctl_info, rcu);
+ INIT_LIST_HEAD(&mci->link);
+ complete(&mci->complete);
+}
-EXPORT_SYMBOL(edac_mc_add_mc);
+static void del_mc_from_global_list(struct mem_ctl_info *mci)
+{
+ list_del_rcu(&mci->link);
+ init_completion(&mci->complete);
+ call_rcu(&mci->rcu, complete_mc_list_del);
+ wait_for_completion(&mci->complete);
+}
/**
- * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
+ * create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
*
* Return:
@@ -1463,111 +1453,90 @@ EXPORT_SYMBOL(edac_mc_add_mc);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc(struct mem_ctl_info *mci)
{
- int rc = 1;
-
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_mc_dump_mci(mci);
+
if (edac_debug_level >= 4) {
int i;
for (i = 0; i < mci->nr_csrows; i++) {
int j;
+
edac_mc_dump_csrow(&mci->csrows[i]);
for (j = 0; j < mci->csrows[i].nr_channels; j++)
- edac_mc_dump_channel(&mci->csrows[i].
- channels[j]);
+ edac_mc_dump_channel(
+ &mci->csrows[i].channels[j]);
}
}
#endif
down(&mem_ctls_mutex);
if (add_mc_to_global_list(mci))
- goto finish;
+ goto fail0;
/* set load time so that error rate can be tracked */
mci->start_time = jiffies;
if (edac_create_sysfs_mci_device(mci)) {
- printk(KERN_WARNING
- "EDAC MC%d: failed to create sysfs device\n",
- mci->mc_idx);
- /* FIXME - should there be an error code and unwind? */
- goto finish;
+ edac_mc_printk(mci, KERN_WARNING,
+ "failed to create sysfs device\n");
+ goto fail1;
}
/* Report action taken */
- printk(KERN_INFO
- "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
- mci->mc_idx, mci->mod_name, mci->ctl_name,
- pci_name(mci->pdev));
+ edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n",
+ mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
-
- rc = 0;
-
-finish:
up(&mem_ctls_mutex);
- return rc;
-}
-
-
-
-static void complete_mc_list_del (struct rcu_head *head)
-{
- struct mem_ctl_info *mci;
+ return 0;
- mci = container_of(head, struct mem_ctl_info, rcu);
- INIT_LIST_HEAD(&mci->link);
- complete(&mci->complete);
-}
+fail1:
+ del_mc_from_global_list(mci);
-static void del_mc_from_global_list (struct mem_ctl_info *mci)
-{
- list_del_rcu(&mci->link);
- init_completion(&mci->complete);
- call_rcu(&mci->rcu, complete_mc_list_del);
- wait_for_completion(&mci->complete);
+fail0:
+ up(&mem_ctls_mutex);
+ return 1;
}
-
-EXPORT_SYMBOL(edac_mc_del_mc);
+EXPORT_SYMBOL_GPL(edac_mc_add_mc);
/**
- * edac_mc_del_mc: Remove the specified mci structure from global list
- * @mci: Pointer to struct mem_ctl_info structure
+ * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
+ * remove mci structure from global list
+ * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove.
*
- * Returns:
- * 0 Success
- * 1 Failure
+ * Return pointer to removed mci structure, or NULL if device not found.
*/
-int edac_mc_del_mc(struct mem_ctl_info *mci)
+struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev)
{
- int rc = 1;
+ struct mem_ctl_info *mci;
- debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf0("MC: %s()\n", __func__);
down(&mem_ctls_mutex);
+
+ if ((mci = find_mci_by_pdev(pdev)) == NULL) {
+ up(&mem_ctls_mutex);
+ return NULL;
+ }
+
+ edac_remove_sysfs_mci_device(mci);
del_mc_from_global_list(mci);
- printk(KERN_INFO
- "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
- mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
- pci_name(mci->pdev));
- rc = 0;
up(&mem_ctls_mutex);
-
- return rc;
+ edac_printk(KERN_INFO, EDAC_MC,
+ "Removed device %d for %s %s: PCI %s\n", mci->mc_idx,
+ mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
+ return mci;
}
+EXPORT_SYMBOL_GPL(edac_mc_del_mc);
-
-EXPORT_SYMBOL(edac_mc_scrub_block);
-
-void edac_mc_scrub_block(unsigned long page, unsigned long offset,
- u32 size)
+void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
{
struct page *pg;
void *virt_addr;
unsigned long flags = 0;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
/* ECC error page was not in our memory. Ignore it. */
if(!pfn_valid(page))
@@ -1590,19 +1559,15 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset,
if (PageHighMem(pg))
local_irq_restore(flags);
}
-
+EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
/* FIXME - should return -1 */
-EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
-
-int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page)
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info *csrows = mci->csrows;
int row, i;
- debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
- page);
+ debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
@@ -1611,11 +1576,10 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
if (csrow->nr_pages == 0)
continue;
- debugf3("MC%d: " __FILE__
- ": %s(): first(0x%lx) page(0x%lx)"
- " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
- __func__, csrow->first_page, page,
- csrow->last_page, csrow->page_mask);
+ debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
+ "mask(0x%lx)\n", mci->mc_idx, __func__,
+ csrow->first_page, page, csrow->last_page,
+ csrow->page_mask);
if ((page >= csrow->first_page) &&
(page <= csrow->last_page) &&
@@ -1627,56 +1591,52 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
}
if (row == -1)
- printk(KERN_ERR
- "EDAC MC%d: could not look up page error address %lx\n",
- mci->mc_idx, (unsigned long) page);
+ edac_mc_printk(mci, KERN_ERR,
+ "could not look up page error address %lx\n",
+ (unsigned long) page);
return row;
}
-
-
-EXPORT_SYMBOL(edac_mc_handle_ce);
+EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
/* FIXME - setable log (warning/emerg) levels */
/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg)
+ unsigned long page_frame_number, unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel, const char *msg)
{
unsigned long remapped_page;
- debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
/* FIXME - maybe make panic on INTERNAL ERROR an option */
if (row >= mci->nr_csrows || row < 0) {
/* something is wrong */
- printk(KERN_ERR
- "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
- mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
+
if (channel >= mci->csrows[row].nr_channels || channel < 0) {
/* something is wrong */
- printk(KERN_ERR
- "EDAC MC%d: INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n",
- mci->mc_idx, channel, mci->csrows[row].nr_channels);
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: channel out of range "
+ "(%d >= %d)\n", channel,
+ mci->csrows[row].nr_channels);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
if (log_ce)
/* FIXME - put in DIMM location */
- printk(KERN_WARNING
- "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
- " grain %d, syndrome 0x%lx, row %d, channel %d,"
- " label \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
+ "0x%lx, row %d, channel %d, label \"%s\": %s\n",
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, syndrome, row, channel,
+ mci->csrows[row].channels[channel].label, msg);
mci->ce_count++;
mci->csrows[row].ce_count++;
@@ -1697,31 +1657,25 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
page_frame_number;
edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ mci->csrows[row].grain);
}
}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-
-EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
-
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg)
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
{
if (log_ce)
- printk(KERN_WARNING
- "EDAC MC%d: CE - no information available: %s\n",
- mci->mc_idx, msg);
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE - no information available: %s\n", msg);
+
mci->ce_noinfo_count++;
mci->ce_count++;
}
-
-
-EXPORT_SYMBOL(edac_mc_handle_ue);
+EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg)
+ unsigned long page_frame_number, unsigned long offset_in_page,
+ int row, const char *msg)
{
int len = EDAC_MC_LABEL_LEN * 4;
char labels[len + 1];
@@ -1729,65 +1683,61 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci,
int chan;
int chars;
- debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
/* FIXME - maybe make panic on INTERNAL ERROR an option */
if (row >= mci->nr_csrows || row < 0) {
/* something is wrong */
- printk(KERN_ERR
- "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
- mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
return;
}
chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
+ mci->csrows[row].channels[0].label);
len -= chars;
pos += chars;
+
for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
chan++) {
chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
+ mci->csrows[row].channels[chan].label);
len -= chars;
pos += chars;
}
if (log_ue)
- printk(KERN_EMERG
- "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
- " labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ edac_mc_printk(mci, KERN_EMERG,
+ "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
+ "labels \"%s\": %s\n", page_frame_number,
+ offset_in_page, mci->csrows[row].grain, row, labels,
+ msg);
if (panic_on_ue)
- panic
- ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
- " labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
+ "row %d, labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
mci->ue_count++;
mci->csrows[row].ue_count++;
}
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-
-EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
-
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg)
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
{
if (panic_on_ue)
panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
if (log_ue)
- printk(KERN_WARNING
- "EDAC MC%d: UE - no information available: %s\n",
- mci->mc_idx, msg);
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE - no information available: %s\n", msg);
mci->ue_noinfo_count++;
mci->ue_count++;
}
-
+EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
#ifdef CONFIG_PCI
@@ -1799,18 +1749,22 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
pci_read_config_word(dev, where, &status);
- /* If we get back 0xFFFF then we must suspect that the card has been pulled but
- the Linux PCI layer has not yet finished cleaning up. We don't want to report
- on such devices */
+ /* If we get back 0xFFFF then we must suspect that the card has been
+ * pulled but the Linux PCI layer has not yet finished cleaning up.
+ * We don't want to report on such devices
+ */
if (status == 0xFFFF) {
u32 sanity;
+
pci_read_config_dword(dev, 0, &sanity);
+
if (sanity == 0xFFFFFFFF)
return 0;
}
+
status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_PARITY;
+ PCI_STATUS_PARITY;
if (status)
/* reset only the bits we are interested in */
@@ -1822,7 +1776,7 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
/* Clear any PCI parity errors logged by this device. */
-static void edac_pci_dev_parity_clear( struct pci_dev *dev )
+static void edac_pci_dev_parity_clear(struct pci_dev *dev)
{
u8 header_type;
@@ -1853,25 +1807,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
/* check the status reg for errors */
if (status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
- printk(KERN_CRIT
- "EDAC PCI- "
+ edac_printk(KERN_CRIT, EDAC_PCI,
"Signaled System Error on %s\n",
- pci_name (dev));
+ pci_name(dev));
if (status & (PCI_STATUS_PARITY)) {
- printk(KERN_CRIT
- "EDAC PCI- "
+ edac_printk(KERN_CRIT, EDAC_PCI,
"Master Data Parity Error on %s\n",
- pci_name (dev));
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
- printk(KERN_CRIT
- "EDAC PCI- "
+ edac_printk(KERN_CRIT, EDAC_PCI,
"Detected Parity Error on %s\n",
- pci_name (dev));
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
@@ -1892,25 +1843,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
/* check the secondary status reg for errors */
if (status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
- printk(KERN_CRIT
- "EDAC PCI-Bridge- "
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Signaled System Error on %s\n",
- pci_name (dev));
+ pci_name(dev));
if (status & (PCI_STATUS_PARITY)) {
- printk(KERN_CRIT
- "EDAC PCI-Bridge- "
- "Master Data Parity Error on %s\n",
- pci_name (dev));
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
- printk(KERN_CRIT
- "EDAC PCI-Bridge- "
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Detected Parity Error on %s\n",
- pci_name (dev));
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
@@ -1929,58 +1877,55 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
* Returns: 0 not found
* 1 found on list
*/
-static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
- struct pci_dev *dev)
-{
- int i;
- int rc = 0; /* Assume not found */
- unsigned short vendor=dev->vendor;
- unsigned short device=dev->device;
-
- /* Scan the list, looking for a vendor/device match
- */
- for (i = 0; i < free_index; i++, list++ ) {
- if ( (list->vendor == vendor ) &&
- (list->device == device )) {
- rc = 1;
- break;
- }
- }
+static int check_dev_on_list(struct edac_pci_device_list *list,
+ int free_index, struct pci_dev *dev)
+{
+ int i;
+ int rc = 0; /* Assume not found */
+ unsigned short vendor=dev->vendor;
+ unsigned short device=dev->device;
+
+ /* Scan the list, looking for a vendor/device match */
+ for (i = 0; i < free_index; i++, list++ ) {
+ if ((list->vendor == vendor ) && (list->device == device )) {
+ rc = 1;
+ break;
+ }
+ }
- return rc;
+ return rc;
}
/*
* pci_dev parity list iterator
- * Scan the PCI device list for one iteration, looking for SERRORs
+ * Scan the PCI device list for one iteration, looking for SERRORs
* Master Parity ERRORS or Parity ERRORs on primary or secondary devices
*/
static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
{
- struct pci_dev *dev=NULL;
+ struct pci_dev *dev = NULL;
/* request for kernel access to the next PCI device, if any,
* and while we are looking at it have its reference count
* bumped until we are done with it
*/
while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-
- /* if whitelist exists then it has priority, so only scan those
- * devices on the whitelist
- */
- if (pci_whitelist_count > 0 ) {
- if (check_dev_on_list(pci_whitelist,
+ /* if whitelist exists then it has priority, so only scan
+ * those devices on the whitelist
+ */
+ if (pci_whitelist_count > 0 ) {
+ if (check_dev_on_list(pci_whitelist,
pci_whitelist_count, dev))
fn(dev);
- } else {
+ } else {
/*
* if no whitelist, then check if this devices is
* blacklisted
*/
- if (!check_dev_on_list(pci_blacklist,
+ if (!check_dev_on_list(pci_blacklist,
pci_blacklist_count, dev))
fn(dev);
- }
+ }
}
}
@@ -1989,7 +1934,7 @@ static void do_pci_parity_check(void)
unsigned long flags;
int before_count;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
if (!check_pci_parity)
return;
@@ -2011,7 +1956,6 @@ static void do_pci_parity_check(void)
}
}
-
static inline void clear_pci_parity_errors(void)
{
/* Clear any PCI bus parity errors that devices initially have logged
@@ -2020,37 +1964,30 @@ static inline void clear_pci_parity_errors(void)
edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
}
-
#else /* CONFIG_PCI */
-
static inline void do_pci_parity_check(void)
{
/* no-op */
}
-
static inline void clear_pci_parity_errors(void)
{
/* no-op */
}
-
#endif /* CONFIG_PCI */
/*
* Iterate over all MC instances and check for ECC, et al, errors
*/
-static inline void check_mc_devices (void)
+static inline void check_mc_devices(void)
{
- unsigned long flags;
struct list_head *item;
struct mem_ctl_info *mci;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
-
- /* during poll, have interrupts off */
- local_irq_save(flags);
+ debugf3("%s()\n", __func__);
+ down(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
@@ -2059,10 +1996,9 @@ static inline void check_mc_devices (void)
mci->edac_check(mci);
}
- local_irq_restore(flags);
+ up(&mem_ctls_mutex);
}
-
/*
* Check MC status every poll_msec.
* Check PCI status every poll_msec as well.
@@ -2073,70 +2009,21 @@ static inline void check_mc_devices (void)
*/
static void do_edac_check(void)
{
-
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
-
+ debugf3("%s()\n", __func__);
check_mc_devices();
-
do_pci_parity_check();
}
-
-/*
- * EDAC thread state information
- */
-struct bs_thread_info
-{
- struct task_struct *task;
- struct completion *event;
- char *name;
- void (*run)(void);
-};
-
-static struct bs_thread_info bs_thread;
-
-/*
- * edac_kernel_thread
- * This the kernel thread that processes edac operations
- * in a normal thread environment
- */
static int edac_kernel_thread(void *arg)
{
- struct bs_thread_info *thread = (struct bs_thread_info *) arg;
-
- /* detach thread */
- daemonize(thread->name);
-
- current->exit_signal = SIGCHLD;
- allow_signal(SIGKILL);
- thread->task = current;
-
- /* indicate to starting task we have started */
- complete(thread->event);
-
- /* loop forever, until we are told to stop */
- while(thread->run != NULL) {
- void (*run)(void);
-
- /* call the function to check the memory controllers */
- run = thread->run;
- if (run)
- run();
-
- if (signal_pending(current))
- flush_signals(current);
-
- /* ensure we are interruptable */
- set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ do_edac_check();
/* goto sleep for the interval */
- schedule_timeout((HZ * poll_msec) / 1000);
+ schedule_timeout_interruptible((HZ * poll_msec) / 1000);
try_to_freeze();
}
- /* notify waiter that we are exiting */
- complete(thread->event);
-
return 0;
}
@@ -2146,10 +2033,7 @@ static int edac_kernel_thread(void *arg)
*/
static int __init edac_mc_init(void)
{
- int ret;
- struct completion event;
-
- printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
+ edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
/*
* Harvest and clear any boot/initialization PCI parity errors
@@ -2160,80 +2044,54 @@ static int __init edac_mc_init(void)
*/
clear_pci_parity_errors();
- /* perform check for first time to harvest boot leftovers */
- do_edac_check();
-
/* Create the MC sysfs entires */
if (edac_sysfs_memctrl_setup()) {
- printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Error initializing sysfs code\n");
return -ENODEV;
}
/* Create the PCI parity sysfs entries */
if (edac_sysfs_pci_setup()) {
edac_sysfs_memctrl_teardown();
- printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
+ edac_printk(KERN_ERR, EDAC_MC,
+ "EDAC PCI: Error initializing sysfs code\n");
return -ENODEV;
}
- /* Create our kernel thread */
- init_completion(&event);
- bs_thread.event = &event;
- bs_thread.name = "kedac";
- bs_thread.run = do_edac_check;
-
/* create our kernel thread */
- ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
- if (ret < 0) {
+ edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
+
+ if (IS_ERR(edac_thread)) {
/* remove the sysfs entries */
edac_sysfs_memctrl_teardown();
edac_sysfs_pci_teardown();
- return -ENOMEM;
+ return PTR_ERR(edac_thread);
}
- /* wait for our kernel theard ack that it is up and running */
- wait_for_completion(&event);
-
return 0;
}
-
/*
* edac_mc_exit()
* module exit/termination functioni
*/
static void __exit edac_mc_exit(void)
{
- struct completion event;
-
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
- init_completion(&event);
- bs_thread.event = &event;
-
- /* As soon as ->run is set to NULL, the task could disappear,
- * so we need to hold tasklist_lock until we have sent the signal
- */
- read_lock(&tasklist_lock);
- bs_thread.run = NULL;
- send_sig(SIGKILL, bs_thread.task, 1);
- read_unlock(&tasklist_lock);
- wait_for_completion(&event);
+ debugf0("%s()\n", __func__);
+ kthread_stop(edac_thread);
/* tear down the sysfs device */
edac_sysfs_memctrl_teardown();
edac_sysfs_pci_teardown();
}
-
-
-
module_init(edac_mc_init);
module_exit(edac_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
+ "Based on work by Dan Hollis et al");
MODULE_DESCRIPTION("Core library routines for MC reporting");
module_param(panic_on_ue, int, 0644);
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 75ecf484a43..8d9e83909b9 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -15,11 +15,9 @@
*
*/
-
#ifndef _EDAC_MC_H_
#define _EDAC_MC_H_
-
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -33,7 +31,6 @@
#include <linux/completion.h>
#include <linux/kobject.h>
-
#define EDAC_MC_LABEL_LEN 31
#define MC_PROC_NAME_MAX_LEN 7
@@ -43,31 +40,53 @@
#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
#endif
+#define edac_printk(level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
#ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level;
-#define edac_debug_printk(level, fmt, args...) \
-do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
+
+#define edac_debug_printk(level, fmt, arg...) \
+ do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
+ } while(0)
+
#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
-#else /* !CONFIG_EDAC_DEBUG */
+
+#else /* !CONFIG_EDAC_DEBUG */
+
#define debugf0( ... )
#define debugf1( ... )
#define debugf2( ... )
#define debugf3( ... )
#define debugf4( ... )
-#endif /* !CONFIG_EDAC_DEBUG */
+#endif /* !CONFIG_EDAC_DEBUG */
-#define bs_xstr(s) bs_str(s)
-#define bs_str(s) #s
-#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
+#define edac_xstr(s) edac_str(s)
+#define edac_str(s) #s
+#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
#define BIT(x) (1 << (x))
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+ PCI_DEVICE_ID_ ## vend ## _ ## dev
/* memory devices */
enum dev_type {
@@ -117,7 +136,6 @@ enum mem_type {
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
@@ -142,7 +160,6 @@ enum edac_type {
#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
-
/* scrubbing capabilities */
enum scrub_type {
SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
@@ -166,11 +183,6 @@ enum scrub_type {
#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
-enum mci_sysfs_status {
- MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
- MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
-};
-
/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
/*
@@ -255,20 +267,19 @@ enum mci_sysfs_status {
* PS - I enjoyed writing all that about as much as you enjoyed reading it.
*/
-
struct channel_info {
int chan_idx; /* channel index */
u32 ce_count; /* Correctable Errors for this CHANNEL */
- char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
struct csrow_info *csrow; /* the parent */
};
-
struct csrow_info {
unsigned long first_page; /* first page number in dimm */
unsigned long last_page; /* last page number in dimm */
unsigned long page_mask; /* used for interleaving -
- 0UL for non intlv */
+ * 0UL for non intlv
+ */
u32 nr_pages; /* number of pages in csrow */
u32 grain; /* granularity of reported error in bytes */
int csrow_idx; /* the chip-select row */
@@ -280,29 +291,28 @@ struct csrow_info {
struct mem_ctl_info *mci; /* the parent */
struct kobject kobj; /* sysfs kobject for this csrow */
+ struct completion kobj_complete;
/* FIXME the number of CHANNELs might need to become dynamic */
u32 nr_channels;
struct channel_info *channels;
};
-
struct mem_ctl_info {
struct list_head link; /* for global list of mem_ctl_info structs */
unsigned long mtype_cap; /* memory types supported by mc */
unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
unsigned long edac_cap; /* configuration capabilities - this is
- closely related to edac_ctl_cap. The
- difference is that the controller
- may be capable of s4ecd4ed which would
- be listed in edac_ctl_cap, but if
- channels aren't capable of s4ecd4ed then the
- edac_cap would not have that capability. */
+ * closely related to edac_ctl_cap. The
+ * difference is that the controller may be
+ * capable of s4ecd4ed which would be listed
+ * in edac_ctl_cap, but if channels aren't
+ * capable of s4ecd4ed then the edac_cap would
+ * not have that capability.
+ */
unsigned long scrub_cap; /* chipset scrub capabilities */
enum scrub_type scrub_mode; /* current scrub mode */
- enum mci_sysfs_status sysfs_active; /* status of sysfs */
-
/* pointer to edac checking routine */
void (*edac_check) (struct mem_ctl_info * mci);
/*
@@ -311,7 +321,7 @@ struct mem_ctl_info {
*/
/* FIXME - why not send the phys page to begin with? */
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
- unsigned long page);
+ unsigned long page);
int mc_idx;
int nr_csrows;
struct csrow_info *csrows;
@@ -340,72 +350,69 @@ struct mem_ctl_info {
/* edac sysfs device control */
struct kobject edac_mci_kobj;
+ struct completion kobj_complete;
};
-
-
/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
- u8 value, u8 mask)
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+ u8 mask)
{
if (mask != 0xff) {
u8 buf;
+
pci_read_config_byte(pdev, offset, &buf);
value &= mask;
buf &= ~mask;
value |= buf;
}
+
pci_write_config_byte(pdev, offset, value);
}
-
/* write all or some bits in a word-register*/
static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
- u16 value, u16 mask)
+ u16 value, u16 mask)
{
if (mask != 0xffff) {
u16 buf;
+
pci_read_config_word(pdev, offset, &buf);
value &= mask;
buf &= ~mask;
value |= buf;
}
+
pci_write_config_word(pdev, offset, value);
}
-
/* write all or some bits in a dword-register*/
static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
- u32 value, u32 mask)
+ u32 value, u32 mask)
{
if (mask != 0xffff) {
u32 buf;
+
pci_read_config_dword(pdev, offset, &buf);
value &= mask;
buf &= ~mask;
value |= buf;
}
+
pci_write_config_dword(pdev, offset, value);
}
-
#ifdef CONFIG_EDAC_DEBUG
void edac_mc_dump_channel(struct channel_info *chan);
void edac_mc_dump_mci(struct mem_ctl_info *mci);
void edac_mc_dump_csrow(struct csrow_info *csrow);
-#endif /* CONFIG_EDAC_DEBUG */
+#endif /* CONFIG_EDAC_DEBUG */
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
-extern int edac_mc_del_mc(struct mem_ctl_info *mci);
-
+extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page);
-
-extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
- *pdev);
-
-extern void edac_mc_scrub_block(unsigned long page,
- unsigned long offset, u32 size);
+ unsigned long page);
+extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+ u32 size);
/*
* The no info errors are used when error overflows are reported.
@@ -418,31 +425,25 @@ extern void edac_mc_scrub_block(unsigned long page,
* statement clutter and extra function arguments.
*/
extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome,
- int row, int channel, const char *msg);
-
+ unsigned long page_frame_number, unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel,
+ const char *msg);
extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-
+ const char *msg);
extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- int row, const char *msg);
-
+ unsigned long page_frame_number, unsigned long offset_in_page,
+ int row, const char *msg);
extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
+ const char *msg);
/*
* This kmalloc's and initializes all the structures.
* Can't be used if all structures don't have the same lifetime.
*/
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
- unsigned nr_csrows, unsigned nr_chans);
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans);
/* Free an mc previously allocated by edac_mc_alloc() */
extern void edac_mc_free(struct mem_ctl_info *mci);
-
#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 52596e75f9c..fd342163cf9 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -9,7 +9,6 @@
* by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -18,6 +17,11 @@
#include <linux/slab.h>
#include "edac_mc.h"
+#define i82860_printk(level, fmt, arg...) \
+ edac_printk(level, "i82860", fmt, ##arg)
+
+#define i82860_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_82860_0
#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
@@ -48,15 +52,15 @@ struct i82860_error_info {
static const struct i82860_dev_info i82860_devs[] = {
[I82860] = {
- .ctl_name = "i82860"},
+ .ctl_name = "i82860"
+ },
};
static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
- has already registered driver */
+ * has already registered driver
+ */
-static int i82860_registered = 1;
-
-static void i82860_get_error_info (struct mem_ctl_info *mci,
+static void i82860_get_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info)
{
/*
@@ -78,14 +82,15 @@ static void i82860_get_error_info (struct mem_ctl_info *mci,
*/
if (!(info->errsts2 & 0x0003))
return;
+
if ((info->errsts ^ info->errsts2) & 0x0003) {
pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
- &info->derrsyn);
+ &info->derrsyn);
}
}
-static int i82860_process_error_info (struct mem_ctl_info *mci,
+static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info, int handle_errors)
{
int row;
@@ -107,8 +112,8 @@ static int i82860_process_error_info (struct mem_ctl_info *mci,
if (info->errsts & 0x0002)
edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- 0, "i82860 UE");
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
+ "i82860 UE");
return 1;
}
@@ -117,7 +122,7 @@ static void i82860_check(struct mem_ctl_info *mci)
{
struct i82860_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
i82860_get_error_info(mci, &info);
i82860_process_error_info(mci, &info, 1);
}
@@ -128,6 +133,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
int index;
struct mem_ctl_info *mci = NULL;
unsigned long last_cumul_size;
+ struct i82860_error_info discard;
u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
@@ -140,21 +146,20 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
going to make 1 channel for group.
*/
mci = edac_mc_alloc(0, 16, 1);
+
if (!mci)
return -ENOMEM;
- debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
-
+ debugf3("%s(): init mci\n", __func__);
mci->pdev = pdev;
mci->mtype_cap = MEM_FLAG_DDR;
-
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
/* adjust FLAGS */
- mci->mod_name = BS_MOD_STR;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.1.2.6 $";
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->edac_check = i82860_check;
@@ -175,12 +180,13 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
struct csrow_info *csrow = &mci->csrows[index];
pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
- &value);
+ &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
- debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
- __func__, index, cumul_size);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -188,42 +194,43 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
csrow->mtype = MEM_RMBS;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
- /* clear counters */
- pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+ i82860_get_error_info(mci, &discard); /* clear counters */
if (edac_mc_add_mc(mci)) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n",
- __func__);
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
edac_mc_free(mci);
} else {
/* get this far and it's successful */
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
rc = 0;
}
+
return rc;
}
/* returns count (>= 0), or negative on error */
static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
+ i82860_printk(KERN_INFO, "i82860 init one\n");
- printk(KERN_INFO "i82860 init one\n");
- if(pci_enable_device(pdev) < 0)
+ if (pci_enable_device(pdev) < 0)
return -EIO;
+
rc = i82860_probe1(pdev, ent->driver_data);
- if(rc == 0)
+
+ if (rc == 0)
mci_pdev = pci_dev_get(pdev);
+
return rc;
}
@@ -231,23 +238,28 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
- mci = edac_mc_find_mci_by_pdev(pdev);
- if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
- edac_mc_free(mci);
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
}
static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
- {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82860},
- {0,} /* 0 terminated list. */
+ {
+ PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
static struct pci_driver i82860_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = i82860_init_one,
.remove = __devexit_p(i82860_remove_one),
.id_table = i82860_pci_tbl,
@@ -257,43 +269,56 @@ static int __init i82860_init(void)
{
int pci_rc;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
+
if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
- return pci_rc;
+ goto fail0;
if (!mci_pdev) {
- i82860_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82860_0, NULL);
+ PCI_DEVICE_ID_INTEL_82860_0, NULL);
+
if (mci_pdev == NULL) {
debugf0("860 pci_get_device fail\n");
- return -ENODEV;
+ pci_rc = -ENODEV;
+ goto fail1;
}
+
pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+
if (pci_rc < 0) {
debugf0("860 init fail\n");
- pci_dev_put(mci_pdev);
- return -ENODEV;
+ pci_rc = -ENODEV;
+ goto fail1;
}
}
+
return 0;
+
+fail1:
+ pci_unregister_driver(&i82860_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
}
static void __exit i82860_exit(void)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
pci_unregister_driver(&i82860_driver);
- if (!i82860_registered) {
- i82860_remove_one(mci_pdev);
+
+ if (mci_pdev != NULL)
pci_dev_put(mci_pdev);
- }
}
module_init(i82860_init);
module_exit(i82860_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR
- ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
+ "Ben Woodard <woodard@redhat.com>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 1991f94af75..0aec92698f1 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -13,18 +13,19 @@
* Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/pci.h>
#include <linux/pci_ids.h>
-
#include <linux/slab.h>
-
#include "edac_mc.h"
+#define i82875p_printk(level, fmt, arg...) \
+ edac_printk(level, "i82875p", fmt, ##arg)
+
+#define i82875p_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_82875_0
#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
@@ -34,11 +35,9 @@
#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
-
/* four csrows in dual channel, eight in single channel */
#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
-
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
*
@@ -87,7 +86,6 @@
* 0 reserved
*/
-
/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
*
@@ -151,23 +149,19 @@
* 1:0 DRAM type 01=DDR
*/
-
enum i82875p_chips {
I82875P = 0,
};
-
struct i82875p_pvt {
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
};
-
struct i82875p_dev_info {
const char *ctl_name;
};
-
struct i82875p_error_info {
u16 errsts;
u32 eap;
@@ -176,17 +170,19 @@ struct i82875p_error_info {
u16 errsts2;
};
-
static const struct i82875p_dev_info i82875p_devs[] = {
[I82875P] = {
- .ctl_name = "i82875p"},
+ .ctl_name = "i82875p"
+ },
};
-static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
- has already registered driver */
+static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
static int i82875p_registered = 1;
-static void i82875p_get_error_info (struct mem_ctl_info *mci,
+static void i82875p_get_error_info(struct mem_ctl_info *mci,
struct i82875p_error_info *info)
{
/*
@@ -210,15 +206,16 @@ static void i82875p_get_error_info (struct mem_ctl_info *mci,
*/
if (!(info->errsts2 & 0x0081))
return;
+
if ((info->errsts ^ info->errsts2) & 0x0081) {
pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
- &info->derrsyn);
+ &info->derrsyn);
}
}
-static int i82875p_process_error_info (struct mem_ctl_info *mci,
+static int i82875p_process_error_info(struct mem_ctl_info *mci,
struct i82875p_error_info *info, int handle_errors)
{
int row, multi_chan;
@@ -243,23 +240,21 @@ static int i82875p_process_error_info (struct mem_ctl_info *mci,
edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
else
edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ multi_chan ? (info->des & 0x1) : 0,
+ "i82875p CE");
return 1;
}
-
static void i82875p_check(struct mem_ctl_info *mci)
{
struct i82875p_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
i82875p_get_error_info(mci, &info);
i82875p_process_error_info(mci, &info, 1);
}
-
#ifdef CONFIG_PROC_FS
extern int pci_proc_attach_device(struct pci_dev *);
#endif
@@ -273,15 +268,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
unsigned long last_cumul_size;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window = NULL;
-
u32 drc;
u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
u32 nr_chans;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ struct i82875p_error_info discard;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
- ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+ debugf0("%s()\n", __func__);
+ ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (!ovrfl_pdev) {
/*
@@ -292,71 +286,69 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
*/
pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
ovrfl_pdev =
- pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+ pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+
if (!ovrfl_pdev)
- goto fail;
+ return -ENODEV;
}
+
#ifdef CONFIG_PROC_FS
if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
- printk(KERN_ERR "MC: " __FILE__
- ": %s(): Failed to attach overflow device\n",
- __func__);
- goto fail;
+ i82875p_printk(KERN_ERR,
+ "%s(): Failed to attach overflow device\n", __func__);
+ return -ENODEV;
}
-#endif /* CONFIG_PROC_FS */
+#endif
+ /* CONFIG_PROC_FS */
if (pci_enable_device(ovrfl_pdev)) {
- printk(KERN_ERR "MC: " __FILE__
- ": %s(): Failed to enable overflow device\n",
- __func__);
- goto fail;
+ i82875p_printk(KERN_ERR,
+ "%s(): Failed to enable overflow device\n", __func__);
+ return -ENODEV;
}
if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
#ifdef CORRECT_BIOS
- goto fail;
+ goto fail0;
#endif
}
+
/* cache is irrelevant for PCI bus reads/writes */
ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
- pci_resource_len(ovrfl_pdev, 0));
+ pci_resource_len(ovrfl_pdev, 0));
if (!ovrfl_window) {
- printk(KERN_ERR "MC: " __FILE__
- ": %s(): Failed to ioremap bar6\n", __func__);
- goto fail;
+ i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
+ __func__);
+ goto fail1;
}
/* need to find out the number of channels */
drc = readl(ovrfl_window + I82875P_DRC);
drc_chan = ((drc >> 21) & 0x1);
nr_chans = drc_chan + 1;
- drc_ddim = (drc >> 18) & 0x1;
+ drc_ddim = (drc >> 18) & 0x1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans);
+ nr_chans);
if (!mci) {
rc = -ENOMEM;
- goto fail;
+ goto fail2;
}
- debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
-
+ debugf3("%s(): init mci\n", __func__);
mci->pdev = pdev;
mci->mtype_cap = MEM_FLAG_DDR;
-
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
/* adjust FLAGS */
- mci->mod_name = BS_MOD_STR;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.5.2.11 $";
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
-
- debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
-
+ debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82875p_pvt *) mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
@@ -374,8 +366,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
- debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
- __func__, index, cumul_size);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -383,71 +376,72 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
csrow->mtype = MEM_DDR;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
}
- /* clear counters */
- pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+ i82875p_get_error_info(mci, &discard); /* clear counters */
if (edac_mc_add_mc(mci)) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
- goto fail;
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail3;
}
/* get this far and it's successful */
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
return 0;
- fail:
- if (mci)
- edac_mc_free(mci);
+fail3:
+ edac_mc_free(mci);
- if (ovrfl_window)
- iounmap(ovrfl_window);
+fail2:
+ iounmap(ovrfl_window);
- if (ovrfl_pdev) {
- pci_release_regions(ovrfl_pdev);
- pci_disable_device(ovrfl_pdev);
- }
+fail1:
+ pci_release_regions(ovrfl_pdev);
+#ifdef CORRECT_BIOS
+fail0:
+#endif
+ pci_disable_device(ovrfl_pdev);
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return rc;
}
-
/* returns count (>= 0), or negative on error */
static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
+ i82875p_printk(KERN_INFO, "i82875p init one\n");
- printk(KERN_INFO "i82875p init one\n");
- if(pci_enable_device(pdev) < 0)
+ if (pci_enable_device(pdev) < 0)
return -EIO;
+
rc = i82875p_probe1(pdev, ent->driver_data);
+
if (mci_pdev == NULL)
mci_pdev = pci_dev_get(pdev);
+
return rc;
}
-
static void __devexit i82875p_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
- if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
return;
pvt = (struct i82875p_pvt *) mci->pvt_info;
+
if (pvt->ovrfl_window)
iounmap(pvt->ovrfl_window);
@@ -459,74 +453,84 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
pci_dev_put(pvt->ovrfl_pdev);
}
- if (edac_mc_del_mc(mci))
- return;
-
edac_mc_free(mci);
}
-
static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
- {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82875P},
- {0,} /* 0 terminated list. */
+ {
+ PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
-
static struct pci_driver i82875p_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = i82875p_init_one,
.remove = __devexit_p(i82875p_remove_one),
.id_table = i82875p_pci_tbl,
};
-
static int __init i82875p_init(void)
{
int pci_rc;
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
pci_rc = pci_register_driver(&i82875p_driver);
+
if (pci_rc < 0)
- return pci_rc;
+ goto fail0;
+
if (mci_pdev == NULL) {
- i82875p_registered = 0;
- mci_pdev =
- pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82875_0, NULL);
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
+
if (!mci_pdev) {
debugf0("875p pci_get_device fail\n");
- return -ENODEV;
+ pci_rc = -ENODEV;
+ goto fail1;
}
+
pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+
if (pci_rc < 0) {
debugf0("875p init fail\n");
- pci_dev_put(mci_pdev);
- return -ENODEV;
+ pci_rc = -ENODEV;
+ goto fail1;
}
}
+
return 0;
-}
+fail1:
+ pci_unregister_driver(&i82875p_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
static void __exit i82875p_exit(void)
{
- debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ debugf3("%s()\n", __func__);
pci_unregister_driver(&i82875p_driver);
+
if (!i82875p_registered) {
i82875p_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
-
module_init(i82875p_init);
module_exit(i82875p_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e90892831b9..2c29fafe67c 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -18,14 +18,17 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/pci.h>
#include <linux/pci_ids.h>
-
#include <linux/slab.h>
-
#include "edac_mc.h"
+#define r82600_printk(level, fmt, arg...) \
+ edac_printk(level, "r82600", fmt, ##arg)
+
+#define r82600_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
+
/* Radisys say "The 82600 integrates a main memory SDRAM controller that
* supports up to four banks of memory. The four banks can support a mix of
* sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
@@ -126,10 +129,8 @@ struct r82600_error_info {
u32 eapr;
};
-
static unsigned int disable_hardware_scrub = 0;
-
static void r82600_get_error_info (struct mem_ctl_info *mci,
struct r82600_error_info *info)
{
@@ -138,17 +139,16 @@ static void r82600_get_error_info (struct mem_ctl_info *mci,
if (info->eapr & BIT(0))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(mci->pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
if (info->eapr & BIT(1))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(mci->pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
}
-
static int r82600_process_error_info (struct mem_ctl_info *mci,
struct r82600_error_info *info, int handle_errors)
{
@@ -167,26 +167,25 @@ static int r82600_process_error_info (struct mem_ctl_info *mci,
* granularity (upper 19 bits only) */
page = eapaddr >> PAGE_SHIFT;
- if (info->eapr & BIT(0)) { /* CE? */
+ if (info->eapr & BIT(0)) { /* CE? */
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(
- mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, /* channel */
- mci->ctl_name);
+ edac_mc_handle_ce(mci, page, 0, /* not avail */
+ syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, /* channel */
+ mci->ctl_name);
}
- if (info->eapr & BIT(1)) { /* UE? */
+ if (info->eapr & BIT(1)) { /* UE? */
error_found = 1;
if (handle_errors)
/* 82600 doesn't give enough info */
edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
}
return error_found;
@@ -196,7 +195,7 @@ static void r82600_check(struct mem_ctl_info *mci)
{
struct r82600_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
r82600_get_error_info(mci, &info);
r82600_process_error_info(mci, &info, 1);
}
@@ -213,25 +212,18 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
u32 scrub_disabled;
u32 sdram_refresh_rate;
u32 row_high_limit_last = 0;
- u32 eap_init_bits;
-
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
+ struct r82600_error_info discard;
+ debugf0("%s()\n", __func__);
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr);
-
ecc_on = dramcr & BIT(5);
reg_sdram = dramcr & BIT(4);
scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
-
- debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
- __func__, sdram_refresh_rate);
-
- debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
- dramcr);
-
+ debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
+ sdram_refresh_rate);
+ debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
if (mci == NULL) {
@@ -239,29 +231,28 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
goto fail;
}
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
-
+ debugf0("%s(): mci = %p\n", __func__, mci);
mci->pdev = pdev;
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
-
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- /* FIXME try to work out if the chip leads have been *
- * used for COM2 instead on this board? [MA6?] MAYBE: */
+ /* FIXME try to work out if the chip leads have been used for COM2
+ * instead on this board? [MA6?] MAYBE:
+ */
/* On the R82600, the pins for memory bits 72:65 - i.e. the *
* EC bits are shared with the pins for COM2 (!), so if COM2 *
* is enabled, we assume COM2 is wired up, and thus no EDAC *
* is possible. */
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+
if (ecc_on) {
if (scrub_disabled)
- debugf3("MC: " __FILE__ ": %s(): mci = %p - "
- "Scrubbing disabled! EAP: %#0x\n", __func__,
- mci, eapr);
+ debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
+ "%#0x\n", __func__, mci, eapr);
} else
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = BS_MOD_STR;
+ mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = "$Revision: 1.1.2.6 $";
mci->ctl_name = "R82600";
mci->edac_check = r82600_check;
@@ -276,23 +267,21 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
- debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
- mci->mc_idx, __func__, index, drbar);
+ debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
+ __func__, index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
- debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
- "Boundry Address=%#0x, Last = %#0x \n",
- mci->mc_idx, __func__, index, row_high_limit,
- row_high_limit_last);
+ debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
+ "%#0x \n", mci->mc_idx, __func__, index,
+ row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
-
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
@@ -308,31 +297,22 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
row_high_limit_last = row_high_limit;
}
- /* clear counters */
- /* FIXME should we? */
+ r82600_get_error_info(mci, &discard); /* clear counters */
if (edac_mc_add_mc(mci)) {
- debugf3("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
/* get this far and it's successful */
- /* Clear error flags to allow next error to be reported [p.62] */
- /* Test systems seem to always have the UE flag raised on boot */
-
- eap_init_bits = BIT(0) & BIT(1);
if (disable_hardware_scrub) {
- eap_init_bits |= BIT(31);
- debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
- "(scrub on error)\n", __func__);
+ debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
+ __func__);
+ pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31));
}
- pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
- eap_init_bits);
-
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("%s(): success\n", __func__);
return 0;
fail:
@@ -344,62 +324,60 @@ fail:
/* returns count (>= 0), or negative on error */
static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
/* don't need to call pci_device_enable() */
return r82600_probe1(pdev, ent->driver_data);
}
-
static void __devexit r82600_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s()\n", __func__);
- if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
- !edac_mc_del_mc(mci))
- edac_mc_free(mci);
-}
+ if ((mci = edac_mc_del_mc(pdev)) == NULL)
+ return;
+ edac_mc_free(mci);
+}
static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
- {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
- {0,} /* 0 terminated list. */
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
-
static struct pci_driver r82600_driver = {
- .name = BS_MOD_STR,
+ .name = EDAC_MOD_STR,
.probe = r82600_init_one,
.remove = __devexit_p(r82600_remove_one),
.id_table = r82600_pci_tbl,
};
-
static int __init r82600_init(void)
{
return pci_register_driver(&r82600_driver);
}
-
static void __exit r82600_exit(void)
{
pci_unregister_driver(&r82600_driver);
}
-
module_init(r82600_init);
module_exit(r82600_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
- "on behalf of EADS Astrium");
+ "on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 343379f23a5..9b7e4d52ffd 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -568,20 +568,20 @@ systab_read(struct subsystem *entry, char *buf)
if (!entry || !buf)
return -EINVAL;
- if (efi.mps)
- str += sprintf(str, "MPS=0x%lx\n", __pa(efi.mps));
- if (efi.acpi20)
- str += sprintf(str, "ACPI20=0x%lx\n", __pa(efi.acpi20));
- if (efi.acpi)
- str += sprintf(str, "ACPI=0x%lx\n", __pa(efi.acpi));
- if (efi.smbios)
- str += sprintf(str, "SMBIOS=0x%lx\n", __pa(efi.smbios));
- if (efi.hcdp)
- str += sprintf(str, "HCDP=0x%lx\n", __pa(efi.hcdp));
- if (efi.boot_info)
- str += sprintf(str, "BOOTINFO=0x%lx\n", __pa(efi.boot_info));
- if (efi.uga)
- str += sprintf(str, "UGA=0x%lx\n", __pa(efi.uga));
+ if (efi.mps != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "MPS=0x%lx\n", efi.mps);
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
+ if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+ if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
+ if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
+ if (efi.uga != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "UGA=0x%lx\n", efi.uga);
return str - buf;
}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index ae1fb45dbb4..c37baf9448b 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -89,19 +89,20 @@ efi_setup_pcdp_console(char *cmdline)
struct pcdp_uart *uart;
struct pcdp_device *dev, *end;
int i, serial = 0;
+ int rc = -ENODEV;
- pcdp = efi.hcdp;
- if (!pcdp)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp));
+ pcdp = ioremap(efi.hcdp, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
if (strstr(cmdline, "console=hcdp")) {
if (pcdp->rev < 3)
serial = 1;
} else if (strstr(cmdline, "console=")) {
printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
- return -ENODEV;
+ goto out;
}
if (pcdp->rev < 3 && efi_uart_console_only())
@@ -110,7 +111,8 @@ efi_setup_pcdp_console(char *cmdline)
for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
if (uart->type == PCDP_CONSOLE_UART) {
- return setup_serial_console(uart);
+ rc = setup_serial_console(uart);
+ goto out;
}
}
}
@@ -121,10 +123,13 @@ efi_setup_pcdp_console(char *cmdline)
dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
if (dev->flags & PCDP_PRIMARY_CONSOLE) {
if (dev->type == PCDP_CONSOLE_VGA) {
- return setup_vga_console(dev);
+ rc = setup_vga_console(dev);
+ goto out;
}
}
}
- return -ENODEV;
+out:
+ iounmap(pcdp);
+ return rc;
}
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 734b121a055..491e6032bde 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
u64 align_mask = ~(alignment - 1);
if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
- ((hweight32(alignment >> 32) +
- hweight32(alignment & 0xffffffff) != 1))) {
+ (hweight64(alignment) != 1)) {
HPSB_ERR("%s called with invalid alignment: 0x%048llx",
__FUNCTION__, (unsigned long long)alignment);
return retval;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a81f987978c..46d1fec2cfd 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/interrupt.h>
+#include <asm/irq.h>
#ifdef CONFIG_ARM
#include <asm/mach-types.h>
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 03d8ccd5195..988142c30a6 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_ISDN_DRV_SC) += sc/
obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
obj-$(CONFIG_HYSDN) += hysdn/
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
new file mode 100644
index 00000000000..53c4fb62ed8
--- /dev/null
+++ b/drivers/isdn/gigaset/Kconfig
@@ -0,0 +1,42 @@
+menu "Siemens Gigaset"
+ depends on ISDN_I4L
+
+config ISDN_DRV_GIGASET
+ tristate "Siemens Gigaset support (isdn)"
+ depends on ISDN_I4L && m
+# depends on ISDN_I4L && MODULES
+ help
+ Say m here if you have a Gigaset or Sinus isdn device.
+
+if ISDN_DRV_GIGASET!=n
+
+config GIGASET_BASE
+ tristate "Gigaset base station support"
+ depends on ISDN_DRV_GIGASET && USB
+ help
+ Say m here if you need to communicate with the base
+ directly via USB.
+
+config GIGASET_M105
+ tristate "Gigaset M105 support"
+ depends on ISDN_DRV_GIGASET && USB
+ help
+ Say m here if you need the driver for the Gigaset M105 device.
+
+config GIGASET_DEBUG
+ bool "Gigaset debugging"
+ help
+ This enables debugging code in the Gigaset drivers.
+ If in doubt, say yes.
+
+config GIGASET_UNDOCREQ
+ bool "Support for undocumented USB requests"
+ help
+ This enables support for USB requests we only know from
+ reverse engineering (currently M105 only). If you need
+ features like configuration mode of M105, say yes. If you
+ care about your device, say no.
+
+endif
+
+endmenu
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
new file mode 100644
index 00000000000..9b9acf1a21a
--- /dev/null
+++ b/drivers/isdn/gigaset/Makefile
@@ -0,0 +1,6 @@
+gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o
+usb_gigaset-y := usb-gigaset.o asyncdata.o
+bas_gigaset-y := bas-gigaset.o isocdata.o
+
+obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
+obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
new file mode 100644
index 00000000000..171f8b703d6
--- /dev/null
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -0,0 +1,597 @@
+/*
+ * Common data handling layer for ser_gigaset and usb_gigaset
+ *
+ * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Stefan Eilers <Eilers.Stefan@epost.de>.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: asyncdata.c,v 1.2.2.7 2005/11/13 23:05:18 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/crc-ccitt.h>
+
+//#define GIG_M10x_STUFF_VOICE_DATA
+
+/* check if byte must be stuffed/escaped
+ * I'm not sure which data should be encoded.
+ * Therefore I will go the hard way and decode every value
+ * less than 0x20, the flag sequence and the control escape char.
+ */
+static inline int muststuff(unsigned char c)
+{
+ if (c < PPP_TRANS) return 1;
+ if (c == PPP_FLAG) return 1;
+ if (c == PPP_ESCAPE) return 1;
+ /* other possible candidates: */
+ /* 0x91: XON with parity set */
+ /* 0x93: XOFF with parity set */
+ return 0;
+}
+
+/* == data input =========================================================== */
+
+/* process a block of received bytes in command mode (modem response)
+ * Return value:
+ * number of processed bytes
+ */
+static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
+ struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned cbytes = cs->cbytes;
+ int inputstate = inbuf->inputstate;
+ int startbytes = numbytes;
+
+ for (;;) {
+ cs->respdata[cbytes] = c;
+ if (c == 10 || c == 13) {
+ dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
+ __func__, cbytes);
+ cs->cbytes = cbytes;
+ gigaset_handle_modem_response(cs); /* can change cs->dle */
+ cbytes = 0;
+
+ if (cs->dle &&
+ !(inputstate & INS_DLE_command)) {
+ inputstate &= ~INS_command;
+ break;
+ }
+ } else {
+ /* advance in line buffer, checking for overflow */
+ if (cbytes < MAX_RESP_SIZE - 1)
+ cbytes++;
+ else
+ warn("response too large");
+ }
+
+ if (!numbytes)
+ break;
+ c = *src++;
+ --numbytes;
+ if (c == DLE_FLAG &&
+ (cs->dle || inputstate & INS_DLE_command)) {
+ inputstate |= INS_DLE_char;
+ break;
+ }
+ }
+
+ cs->cbytes = cbytes;
+ inbuf->inputstate = inputstate;
+
+ return startbytes - numbytes;
+}
+
+/* process a block of received bytes in lock mode (tty i/f)
+ * Return value:
+ * number of processed bytes
+ */
+static inline int lock_loop(unsigned char *src, int numbytes,
+ struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+
+ gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src, 0);
+ gigaset_if_receive(cs, src, numbytes);
+
+ return numbytes;
+}
+
+/* process a block of received bytes in HDLC data mode
+ * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
+ * When a frame is complete, check the FCS and pass valid frames to the LL.
+ * If DLE is encountered, return immediately to let the caller handle it.
+ * Return value:
+ * number of processed bytes
+ * numbytes (all bytes processed) on error --FIXME
+ */
+static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
+ struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ struct bc_state *bcs = inbuf->bcs;
+ int inputstate;
+ __u16 fcs;
+ struct sk_buff *skb;
+ unsigned char error;
+ struct sk_buff *compskb;
+ int startbytes = numbytes;
+ int l;
+
+ IFNULLRETVAL(bcs, numbytes);
+ inputstate = bcs->inputstate;
+ fcs = bcs->fcs;
+ skb = bcs->skb;
+ IFNULLRETVAL(skb, numbytes);
+
+ if (unlikely(inputstate & INS_byte_stuff)) {
+ inputstate &= ~INS_byte_stuff;
+ goto byte_stuff;
+ }
+ for (;;) {
+ if (unlikely(c == PPP_ESCAPE)) {
+ if (unlikely(!numbytes)) {
+ inputstate |= INS_byte_stuff;
+ break;
+ }
+ c = *src++;
+ --numbytes;
+ if (unlikely(c == DLE_FLAG &&
+ (cs->dle ||
+ inbuf->inputstate & INS_DLE_command))) {
+ inbuf->inputstate |= INS_DLE_char;
+ inputstate |= INS_byte_stuff;
+ break;
+ }
+byte_stuff:
+ c ^= PPP_TRANS;
+#ifdef CONFIG_GIGASET_DEBUG
+ if (unlikely(!muststuff(c)))
+ dbg(DEBUG_HDLC,
+ "byte stuffed: 0x%02x", c);
+#endif
+ } else if (unlikely(c == PPP_FLAG)) {
+ if (unlikely(inputstate & INS_skip_frame)) {
+ if (!(inputstate & INS_have_data)) { /* 7E 7E */
+ //dbg(DEBUG_HDLC, "(7e)7e------------------------");
+#ifdef CONFIG_GIGASET_DEBUG
+ ++bcs->emptycount;
+#endif
+ } else
+ dbg(DEBUG_HDLC,
+ "7e----------------------------");
+
+ /* end of frame */
+ error = 1;
+ gigaset_rcv_error(NULL, cs, bcs);
+ } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
+ //dbg(DEBUG_HDLC, "(7e)7e------------------------");
+#ifdef CONFIG_GIGASET_DEBUG
+ ++bcs->emptycount;
+#endif
+ break;
+ } else {
+ dbg(DEBUG_HDLC,
+ "7e----------------------------");
+
+ /* end of frame */
+ error = 0;
+
+ if (unlikely(fcs != PPP_GOODFCS)) {
+ err("Packet checksum at %lu failed, "
+ "packet is corrupted (%u bytes)!",
+ bcs->rcvbytes, skb->len);
+ compskb = NULL;
+ gigaset_rcv_error(compskb, cs, bcs);
+ error = 1;
+ } else {
+ if (likely((l = skb->len) > 2)) {
+ skb->tail -= 2;
+ skb->len -= 2;
+ } else {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ inputstate |= INS_skip_frame;
+ if (l == 1) {
+ err("invalid packet size (1)!");
+ error = 1;
+ gigaset_rcv_error(NULL, cs, bcs);
+ }
+ }
+ if (likely(!(error ||
+ (inputstate &
+ INS_skip_frame)))) {
+ gigaset_rcv_skb(skb, cs, bcs);
+ }
+ }
+ }
+
+ if (unlikely(error))
+ if (skb)
+ dev_kfree_skb(skb);
+
+ fcs = PPP_INITFCS;
+ inputstate &= ~(INS_have_data | INS_skip_frame);
+ if (unlikely(bcs->ignore)) {
+ inputstate |= INS_skip_frame;
+ skb = NULL;
+ } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
+ skb_reserve(skb, HW_HDR_LEN);
+ } else {
+ warn("could not allocate new skb");
+ inputstate |= INS_skip_frame;
+ }
+
+ break;
+#ifdef CONFIG_GIGASET_DEBUG
+ } else if (unlikely(muststuff(c))) {
+ /* Should not happen. Possible after ZDLE=1<CR><LF>. */
+ dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
+#endif
+ }
+
+ /* add character */
+
+#ifdef CONFIG_GIGASET_DEBUG
+ if (unlikely(!(inputstate & INS_have_data))) {
+ dbg(DEBUG_HDLC,
+ "7e (%d x) ================", bcs->emptycount);
+ bcs->emptycount = 0;
+ }
+#endif
+
+ inputstate |= INS_have_data;
+
+ if (likely(!(inputstate & INS_skip_frame))) {
+ if (unlikely(skb->len == SBUFSIZE)) {
+ warn("received packet too long");
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ inputstate |= INS_skip_frame;
+ break;
+ }
+ *gigaset_skb_put_quick(skb, 1) = c;
+ /* *__skb_put (skb, 1) = c; */
+ fcs = crc_ccitt_byte(fcs, c);
+ }
+
+ if (unlikely(!numbytes))
+ break;
+ c = *src++;
+ --numbytes;
+ if (unlikely(c == DLE_FLAG &&
+ (cs->dle ||
+ inbuf->inputstate & INS_DLE_command))) {
+ inbuf->inputstate |= INS_DLE_char;
+ break;
+ }
+ }
+ bcs->inputstate = inputstate;
+ bcs->fcs = fcs;
+ bcs->skb = skb;
+ return startbytes - numbytes;
+}
+
+/* process a block of received bytes in transparent data mode
+ * Invert bytes, undoing byte stuffing and watching for DLE escapes.
+ * If DLE is encountered, return immediately to let the caller handle it.
+ * Return value:
+ * number of processed bytes
+ * numbytes (all bytes processed) on error --FIXME
+ */
+static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
+ struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ struct bc_state *bcs = inbuf->bcs;
+ int inputstate;
+ struct sk_buff *skb;
+ int startbytes = numbytes;
+
+ IFNULLRETVAL(bcs, numbytes);
+ inputstate = bcs->inputstate;
+ skb = bcs->skb;
+ IFNULLRETVAL(skb, numbytes);
+
+ for (;;) {
+ /* add character */
+ inputstate |= INS_have_data;
+
+ if (likely(!(inputstate & INS_skip_frame))) {
+ if (unlikely(skb->len == SBUFSIZE)) {
+ //FIXME just pass skb up and allocate a new one
+ warn("received packet too long");
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ inputstate |= INS_skip_frame;
+ break;
+ }
+ *gigaset_skb_put_quick(skb, 1) = gigaset_invtab[c];
+ }
+
+ if (unlikely(!numbytes))
+ break;
+ c = *src++;
+ --numbytes;
+ if (unlikely(c == DLE_FLAG &&
+ (cs->dle ||
+ inbuf->inputstate & INS_DLE_command))) {
+ inbuf->inputstate |= INS_DLE_char;
+ break;
+ }
+ }
+
+ /* pass data up */
+ if (likely(inputstate & INS_have_data)) {
+ if (likely(!(inputstate & INS_skip_frame))) {
+ gigaset_rcv_skb(skb, cs, bcs);
+ }
+ inputstate &= ~(INS_have_data | INS_skip_frame);
+ if (unlikely(bcs->ignore)) {
+ inputstate |= INS_skip_frame;
+ skb = NULL;
+ } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN))
+ != NULL)) {
+ skb_reserve(skb, HW_HDR_LEN);
+ } else {
+ warn("could not allocate new skb");
+ inputstate |= INS_skip_frame;
+ }
+ }
+
+ bcs->inputstate = inputstate;
+ bcs->skb = skb;
+ return startbytes - numbytes;
+}
+
+/* process a block of data received from the device
+ */
+void gigaset_m10x_input(struct inbuf_t *inbuf)
+{
+ struct cardstate *cs;
+ unsigned tail, head, numbytes;
+ unsigned char *src, c;
+ int procbytes;
+
+ head = atomic_read(&inbuf->head);
+ tail = atomic_read(&inbuf->tail);
+ dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+
+ if (head != tail) {
+ cs = inbuf->cs;
+ src = inbuf->data + head;
+ numbytes = (head > tail ? RBUFSIZE : tail) - head;
+ dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+
+ while (numbytes) {
+ if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ procbytes = lock_loop(src, numbytes, inbuf);
+ src += procbytes;
+ numbytes -= procbytes;
+ } else {
+ c = *src++;
+ --numbytes;
+ if (c == DLE_FLAG && (cs->dle ||
+ inbuf->inputstate & INS_DLE_command)) {
+ if (!(inbuf->inputstate & INS_DLE_char)) {
+ inbuf->inputstate |= INS_DLE_char;
+ goto nextbyte;
+ }
+ /* <DLE> <DLE> => <DLE> in data stream */
+ inbuf->inputstate &= ~INS_DLE_char;
+ }
+
+ if (!(inbuf->inputstate & INS_DLE_char)) {
+
+ /* FIXME Einfach je nach Modus Funktionszeiger in cs setzen [hier+hdlc_loop]? */
+ /* FIXME Spart folgendes "if" und ermoeglicht andere Protokolle */
+ if (inbuf->inputstate & INS_command)
+ procbytes = cmd_loop(c, src, numbytes, inbuf);
+ else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
+ procbytes = hdlc_loop(c, src, numbytes, inbuf);
+ else
+ procbytes = iraw_loop(c, src, numbytes, inbuf);
+
+ src += procbytes;
+ numbytes -= procbytes;
+ } else { /* DLE-char */
+ inbuf->inputstate &= ~INS_DLE_char;
+ switch (c) {
+ case 'X': /*begin of command*/
+#ifdef CONFIG_GIGASET_DEBUG
+ if (inbuf->inputstate & INS_command)
+ err("received <DLE> 'X' in command mode");
+#endif
+ inbuf->inputstate |=
+ INS_command | INS_DLE_command;
+ break;
+ case '.': /*end of command*/
+#ifdef CONFIG_GIGASET_DEBUG
+ if (!(inbuf->inputstate & INS_command))
+ err("received <DLE> '.' in hdlc mode");
+#endif
+ inbuf->inputstate &= cs->dle ?
+ ~(INS_DLE_command|INS_command)
+ : ~INS_DLE_command;
+ break;
+ //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
+ default:
+ err("received 0x10 0x%02x!", (int) c);
+ /* FIXME: reset driver?? */
+ }
+ }
+ }
+nextbyte:
+ if (!numbytes) {
+ /* end of buffer, check for wrap */
+ if (head > tail) {
+ head = 0;
+ src = inbuf->data;
+ numbytes = tail;
+ } else {
+ head = tail;
+ break;
+ }
+ }
+ }
+
+ dbg(DEBUG_INTR, "setting head to %u", head);
+ atomic_set(&inbuf->head, head);
+ }
+}
+
+
+/* == data output ========================================================== */
+
+/* Encoding of a PPP packet into an octet stuffed HDLC frame
+ * with FCS, opening and closing flags.
+ * parameters:
+ * skb skb containing original packet (freed upon return)
+ * head number of headroom bytes to allocate in result skb
+ * tail number of tailroom bytes to allocate in result skb
+ * Return value:
+ * pointer to newly allocated skb containing the result frame
+ */
+static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
+{
+ struct sk_buff *hdlc_skb;
+ __u16 fcs;
+ unsigned char c;
+ unsigned char *cp;
+ int len;
+ unsigned int stuf_cnt;
+
+ stuf_cnt = 0;
+ fcs = PPP_INITFCS;
+ cp = skb->data;
+ len = skb->len;
+ while (len--) {
+ if (muststuff(*cp))
+ stuf_cnt++;
+ fcs = crc_ccitt_byte(fcs, *cp++);
+ }
+ fcs ^= 0xffff; /* complement */
+
+ /* size of new buffer: original size + number of stuffing bytes
+ * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
+ */
+ hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head);
+ if (!hdlc_skb) {
+ err("unable to allocate memory for HDLC encoding!");
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ skb_reserve(hdlc_skb, head);
+
+ /* Copy acknowledge request into new skb */
+ memcpy(hdlc_skb->head, skb->head, 2);
+
+ /* Add flag sequence in front of everything.. */
+ *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
+
+ /* Perform byte stuffing while copying data. */
+ while (skb->len--) {
+ if (muststuff(*skb->data)) {
+ *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
+ *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
+ } else
+ *(skb_put(hdlc_skb, 1)) = *skb->data++;
+ }
+
+ /* Finally add FCS (byte stuffed) and flag sequence */
+ c = (fcs & 0x00ff); /* least significant byte first */
+ if (muststuff(c)) {
+ *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
+ c ^= PPP_TRANS;
+ }
+ *(skb_put(hdlc_skb, 1)) = c;
+
+ c = ((fcs >> 8) & 0x00ff);
+ if (muststuff(c)) {
+ *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
+ c ^= PPP_TRANS;
+ }
+ *(skb_put(hdlc_skb, 1)) = c;
+
+ *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
+
+ dev_kfree_skb(skb);
+ return hdlc_skb;
+}
+
+/* Encoding of a raw packet into an octet stuffed bit inverted frame
+ * parameters:
+ * skb skb containing original packet (freed upon return)
+ * head number of headroom bytes to allocate in result skb
+ * tail number of tailroom bytes to allocate in result skb
+ * Return value:
+ * pointer to newly allocated skb containing the result frame
+ */
+static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
+{
+ struct sk_buff *iraw_skb;
+ unsigned char c;
+ unsigned char *cp;
+ int len;
+
+ /* worst case: every byte must be stuffed */
+ iraw_skb = dev_alloc_skb(2*skb->len + tail + head);
+ if (!iraw_skb) {
+ err("unable to allocate memory for HDLC encoding!");
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ skb_reserve(iraw_skb, head);
+
+ cp = skb->data;
+ len = skb->len;
+ while (len--) {
+ c = gigaset_invtab[*cp++];
+ if (c == DLE_FLAG)
+ *(skb_put(iraw_skb, 1)) = c;
+ *(skb_put(iraw_skb, 1)) = c;
+ }
+ dev_kfree_skb(skb);
+ return iraw_skb;
+}
+
+/* gigaset_send_skb
+ * called by common.c to queue an skb for sending
+ * and start transmission if necessary
+ * parameters:
+ * B Channel control structure
+ * skb
+ * Return value:
+ * number of bytes accepted for sending
+ * (skb->len if ok, 0 if out of buffer space)
+ * or error code (< 0, eg. -EINVAL)
+ */
+int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
+{
+ unsigned len;
+
+ IFNULLRETVAL(bcs, -EFAULT);
+ IFNULLRETVAL(skb, -EFAULT);
+ len = skb->len;
+
+ if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
+ skb = HDLC_Encode(skb, HW_HDR_LEN, 0);
+ else
+ skb = iraw_encode(skb, HW_HDR_LEN, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_queue_tail(&bcs->squeue, skb);
+ tasklet_schedule(&bcs->cs->write_tasklet);
+
+ return len; /* ok so far */
+}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
new file mode 100644
index 00000000000..31f0f07832b
--- /dev/null
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -0,0 +1,2365 @@
+/*
+ * USB driver for Gigaset 307x base via direct USB connection.
+ *
+ * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>,
+ * Stefan Eilers <Eilers.Stefan@epost.de>.
+ *
+ * Based on usb-gigaset.c.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: bas-gigaset.c,v 1.52.4.19 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
+#define DRIVER_DESC "USB Driver for Gigaset 307x"
+
+
+/* Module parameters */
+
+static int startmode = SM_ISDN;
+static int cidmode = 1;
+
+module_param(startmode, int, S_IRUGO);
+module_param(cidmode, int, S_IRUGO);
+MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
+MODULE_PARM_DESC(cidmode, "Call-ID mode");
+
+#define GIGASET_MINORS 1
+#define GIGASET_MINOR 16
+#define GIGASET_MODULENAME "bas_gigaset"
+#define GIGASET_DEVFSNAME "gig/bas/"
+#define GIGASET_DEVNAME "ttyGB"
+
+#define IF_WRITEBUF 256 //FIXME
+
+/* Values for the Gigaset 307x */
+#define USB_GIGA_VENDOR_ID 0x0681
+#define USB_GIGA_PRODUCT_ID 0x0001
+#define USB_4175_PRODUCT_ID 0x0002
+#define USB_SX303_PRODUCT_ID 0x0021
+#define USB_SX353_PRODUCT_ID 0x0022
+
+/* table of devices that work with this driver */
+static struct usb_device_id gigaset_table [] = {
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_GIGA_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_4175_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
+ { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gigaset_table);
+
+/* Get a minor range for your devices from the usb maintainer */
+#define USB_SKEL_MINOR_BASE 200
+
+/*======================= local function prototypes =============================*/
+
+/* This function is called if a new device is connected to the USB port. It
+ * checks whether this new device belongs to this driver.
+ */
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+
+/* Function will be called if the device is unplugged */
+static void gigaset_disconnect(struct usb_interface *interface);
+
+
+/*==============================================================================*/
+
+struct bas_cardstate {
+ struct usb_device *udev; /* USB device pointer */
+ struct usb_interface *interface; /* interface for this device */
+ unsigned char minor; /* starting minor number */
+
+ struct urb *urb_ctrl; /* control pipe default URB */
+ struct usb_ctrlrequest dr_ctrl;
+ struct timer_list timer_ctrl; /* control request timeout */
+
+ struct timer_list timer_atrdy; /* AT command ready timeout */
+ struct urb *urb_cmd_out; /* for sending AT commands */
+ struct usb_ctrlrequest dr_cmd_out;
+ int retry_cmd_out;
+
+ struct urb *urb_cmd_in; /* for receiving AT replies */
+ struct usb_ctrlrequest dr_cmd_in;
+ struct timer_list timer_cmd_in; /* receive request timeout */
+ unsigned char *rcvbuf; /* AT reply receive buffer */
+
+ struct urb *urb_int_in; /* URB for interrupt pipe */
+ unsigned char int_in_buf[3];
+
+ spinlock_t lock; /* locks all following */
+ atomic_t basstate; /* bitmap (BS_*) */
+ int pending; /* uncompleted base request */
+ int rcvbuf_size; /* size of AT receive buffer */
+ /* 0: no receive in progress */
+ int retry_cmd_in; /* receive req retry count */
+};
+
+/* status of direct USB connection to 307x base (bits in basstate) */
+#define BS_ATOPEN 0x001
+#define BS_B1OPEN 0x002
+#define BS_B2OPEN 0x004
+#define BS_ATREADY 0x008
+#define BS_INIT 0x010
+#define BS_ATTIMER 0x020
+
+
+static struct gigaset_driver *driver = NULL;
+static struct cardstate *cardstate = NULL;
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver gigaset_usb_driver = {
+ .name = GIGASET_MODULENAME,
+ .probe = gigaset_probe,
+ .disconnect = gigaset_disconnect,
+ .id_table = gigaset_table,
+};
+
+/* get message text for USB status code
+ */
+static char *get_usb_statmsg(int status)
+{
+ static char unkmsg[28];
+
+ switch (status) {
+ case 0:
+ return "success";
+ case -ENOENT:
+ return "canceled";
+ case -ECONNRESET:
+ return "canceled (async)";
+ case -EINPROGRESS:
+ return "pending";
+ case -EPROTO:
+ return "bit stuffing or unknown USB error";
+ case -EILSEQ:
+ return "Illegal byte sequence (CRC mismatch)";
+ case -EPIPE:
+ return "babble detect or endpoint stalled";
+ case -ENOSR:
+ return "buffer error";
+ case -ETIMEDOUT:
+ return "timed out";
+ case -ENODEV:
+ return "device not present";
+ case -EREMOTEIO:
+ return "short packet detected";
+ case -EXDEV:
+ return "partial isochronous transfer";
+ case -EINVAL:
+ return "invalid argument";
+ case -ENXIO:
+ return "URB already queued";
+ case -EAGAIN:
+ return "isochronous start frame too early or too much scheduled";
+ case -EFBIG:
+ return "too many isochronous frames requested";
+ case -EMSGSIZE:
+ return "endpoint message size zero";
+ case -ESHUTDOWN:
+ return "endpoint shutdown";
+ case -EBUSY:
+ return "another request pending";
+ default:
+ snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", status);
+ return unkmsg;
+ }
+}
+
+/* usb_pipetype_str
+ * retrieve string representation of USB pipe type
+ */
+static inline char *usb_pipetype_str(int pipe)
+{
+ if (usb_pipeisoc(pipe))
+ return "Isoc";
+ if (usb_pipeint(pipe))
+ return "Int";
+ if (usb_pipecontrol(pipe))
+ return "Ctrl";
+ if (usb_pipebulk(pipe))
+ return "Bulk";
+ return "?";
+}
+
+/* dump_urb
+ * write content of URB to syslog for debugging
+ */
+static inline void dump_urb(enum debuglevel level, const char *tag,
+ struct urb *urb)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ int i;
+ IFNULLRET(tag);
+ dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
+ if (urb) {
+ dbg(level,
+ " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
+ "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,",
+ (unsigned long) urb->dev,
+ usb_pipetype_str(urb->pipe),
+ usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->status, (unsigned long) urb->hcpriv,
+ urb->transfer_flags);
+ dbg(level,
+ " transfer_buffer=0x%08lx[%d], actual_length=%d, "
+ "bandwidth=%d, setup_packet=0x%08lx,",
+ (unsigned long) urb->transfer_buffer,
+ urb->transfer_buffer_length, urb->actual_length,
+ urb->bandwidth, (unsigned long) urb->setup_packet);
+ dbg(level,
+ " start_frame=%d, number_of_packets=%d, interval=%d, "
+ "error_count=%d,",
+ urb->start_frame, urb->number_of_packets, urb->interval,
+ urb->error_count);
+ dbg(level,
+ " context=0x%08lx, complete=0x%08lx, iso_frame_desc[]={",
+ (unsigned long) urb->context,
+ (unsigned long) urb->complete);
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct usb_iso_packet_descriptor *pifd = &urb->iso_frame_desc[i];
+ dbg(level,
+ " {offset=%u, length=%u, actual_length=%u, "
+ "status=%u}",
+ pifd->offset, pifd->length, pifd->actual_length,
+ pifd->status);
+ }
+ }
+ dbg(level, "}}");
+#endif
+}
+
+/* read/set modem control bits etc. (m10x only) */
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ return -EINVAL;
+}
+
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+/* error_hangup
+ * hang up any existing connection because of an unrecoverable error
+ * This function may be called from any context and takes care of scheduling
+ * the necessary actions for execution outside of interrupt context.
+ * argument:
+ * B channel control structure
+ */
+static inline void error_hangup(struct bc_state *bcs)
+{
+ struct cardstate *cs = bcs->cs;
+
+ dbg(DEBUG_ANY,
+ "%s: scheduling HUP for channel %d", __func__, bcs->channel);
+
+ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
+ //FIXME what should we do?
+ return;
+ }
+
+ gigaset_schedule_event(cs);
+}
+
+/* error_reset
+ * reset Gigaset device because of an unrecoverable error
+ * This function may be called from any context and takes care of scheduling
+ * the necessary actions for execution outside of interrupt context.
+ * argument:
+ * controller state structure
+ */
+static inline void error_reset(struct cardstate *cs)
+{
+ //FIXME try to recover without bothering the user
+ err("unrecoverable error - please disconnect the Gigaset base to reset");
+}
+
+/* check_pending
+ * check for completion of pending control request
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = hardware specific controller state structure
+ */
+static void check_pending(struct bas_cardstate *ucs)
+{
+ unsigned long flags;
+
+ IFNULLRET(ucs);
+ IFNULLRET(cardstate);
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ switch (ucs->pending) {
+ case 0:
+ break;
+ case HD_OPEN_ATCHANNEL:
+ if (atomic_read(&ucs->basstate) & BS_ATOPEN)
+ ucs->pending = 0;
+ break;
+ case HD_OPEN_B1CHANNEL:
+ if (atomic_read(&ucs->basstate) & BS_B1OPEN)
+ ucs->pending = 0;
+ break;
+ case HD_OPEN_B2CHANNEL:
+ if (atomic_read(&ucs->basstate) & BS_B2OPEN)
+ ucs->pending = 0;
+ break;
+ case HD_CLOSE_ATCHANNEL:
+ if (!(atomic_read(&ucs->basstate) & BS_ATOPEN))
+ ucs->pending = 0;
+ //wake_up_interruptible(cs->initwait);
+ //FIXME need own wait queue?
+ break;
+ case HD_CLOSE_B1CHANNEL:
+ if (!(atomic_read(&ucs->basstate) & BS_B1OPEN))
+ ucs->pending = 0;
+ break;
+ case HD_CLOSE_B2CHANNEL:
+ if (!(atomic_read(&ucs->basstate) & BS_B2OPEN))
+ ucs->pending = 0;
+ break;
+ case HD_DEVICE_INIT_ACK: /* no reply expected */
+ ucs->pending = 0;
+ break;
+ /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE
+ * are handled separately and should never end up here
+ */
+ default:
+ warn("unknown pending request 0x%02x cleared", ucs->pending);
+ ucs->pending = 0;
+ }
+
+ if (!ucs->pending)
+ del_timer(&ucs->timer_ctrl);
+
+ spin_unlock_irqrestore(&ucs->lock, flags);
+}
+
+/* cmd_in_timeout
+ * timeout routine for command input request
+ * argument:
+ * controller state structure
+ */
+static void cmd_in_timeout(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bas_cardstate *ucs;
+ unsigned long flags;
+
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_USBREQ, "%s: disconnected", __func__);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return;
+ }
+ if (!ucs->rcvbuf_size) {
+ dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ err("timeout reading AT response");
+ error_reset(cs); //FIXME retry?
+}
+
+
+static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs);
+
+/* atread_submit
+ * submit an HD_READ_ATMESSAGE command URB
+ * parameters:
+ * cs controller state structure
+ * timeout timeout in 1/10 sec., 0: none
+ * return value:
+ * 0 on success
+ * -EINVAL if a NULL pointer is encountered somewhere
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int atread_submit(struct cardstate *cs, int timeout)
+{
+ struct bas_cardstate *ucs;
+ int ret;
+
+ IFNULLRETVAL(cs, -EINVAL);
+ ucs = cs->hw.bas;
+ IFNULLRETVAL(ucs, -EINVAL);
+ IFNULLRETVAL(ucs->urb_cmd_in, -EINVAL);
+
+ dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)", ucs->rcvbuf_size);
+
+ if (ucs->urb_cmd_in->status == -EINPROGRESS) {
+ err("could not submit HD_READ_ATMESSAGE: URB busy");
+ return -EBUSY;
+ }
+
+ ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
+ ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
+ ucs->dr_cmd_in.wValue = 0;
+ ucs->dr_cmd_in.wIndex = 0;
+ ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
+ usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
+ usb_rcvctrlpipe(ucs->udev, 0),
+ (unsigned char*) & ucs->dr_cmd_in,
+ ucs->rcvbuf, ucs->rcvbuf_size,
+ read_ctrl_callback, cs->inbuf);
+
+ if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
+ err("could not submit HD_READ_ATMESSAGE: %s",
+ get_usb_statmsg(ret));
+ return ret;
+ }
+
+ if (timeout > 0) {
+ dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
+ ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10;
+ ucs->timer_cmd_in.data = (unsigned long) cs;
+ ucs->timer_cmd_in.function = cmd_in_timeout;
+ add_timer(&ucs->timer_cmd_in);
+ }
+ return 0;
+}
+
+static void stopurbs(struct bas_bc_state *);
+static int start_cbsend(struct cardstate *);
+
+/* set/clear bits in base connection state
+ */
+inline static void update_basstate(struct bas_cardstate *ucs,
+ int set, int clear)
+{
+ unsigned long flags;
+ int state;
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ state = atomic_read(&ucs->basstate);
+ state &= ~clear;
+ state |= set;
+ atomic_set(&ucs->basstate, state);
+ spin_unlock_irqrestore(&ucs->lock, flags);
+}
+
+
+/* read_int_callback
+ * USB completion handler for interrupt pipe input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block
+ * urb->context = controller state structure
+ */
+static void read_int_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct cardstate *cs;
+ struct bas_cardstate *ucs;
+ struct bc_state *bcs;
+ unsigned long flags;
+ int status;
+ unsigned l;
+ int channel;
+
+ IFNULLRET(urb);
+ cs = (struct cardstate *) urb->context;
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ if (unlikely(!atomic_read(&cs->connected))) {
+ warn("%s: disconnected", __func__);
+ return;
+ }
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+ case -ENOENT: /* canceled */
+ case -ECONNRESET: /* canceled (async) */
+ case -EINPROGRESS: /* pending */
+ /* ignore silently */
+ dbg(DEBUG_USBREQ,
+ "%s: %s", __func__, get_usb_statmsg(urb->status));
+ return;
+ default: /* severe trouble */
+ warn("interrupt read: %s", get_usb_statmsg(urb->status));
+ //FIXME corrective action? resubmission always ok?
+ goto resubmit;
+ }
+
+ l = (unsigned) ucs->int_in_buf[1] +
+ (((unsigned) ucs->int_in_buf[2]) << 8);
+
+ dbg(DEBUG_USBREQ,
+ "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", urb->actual_length,
+ (int)ucs->int_in_buf[0], l,
+ (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
+
+ channel = 0;
+
+ switch (ucs->int_in_buf[0]) {
+ case HD_DEVICE_INIT_OK:
+ update_basstate(ucs, BS_INIT, 0);
+ break;
+
+ case HD_READY_SEND_ATDATA:
+ del_timer(&ucs->timer_atrdy);
+ update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
+ start_cbsend(cs);
+ break;
+
+ case HD_OPEN_B2CHANNEL_ACK:
+ ++channel;
+ case HD_OPEN_B1CHANNEL_ACK:
+ bcs = cs->bcs + channel;
+ update_basstate(ucs, BS_B1OPEN << channel, 0);
+ gigaset_bchannel_up(bcs);
+ break;
+
+ case HD_OPEN_ATCHANNEL_ACK:
+ update_basstate(ucs, BS_ATOPEN, 0);
+ start_cbsend(cs);
+ break;
+
+ case HD_CLOSE_B2CHANNEL_ACK:
+ ++channel;
+ case HD_CLOSE_B1CHANNEL_ACK:
+ bcs = cs->bcs + channel;
+ update_basstate(ucs, 0, BS_B1OPEN << channel);
+ stopurbs(bcs->hw.bas);
+ gigaset_bchannel_down(bcs);
+ break;
+
+ case HD_CLOSE_ATCHANNEL_ACK:
+ update_basstate(ucs, 0, BS_ATOPEN);
+ break;
+
+ case HD_B2_FLOW_CONTROL:
+ ++channel;
+ case HD_B1_FLOW_CONTROL:
+ bcs = cs->bcs + channel;
+ atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
+ &bcs->hw.bas->corrbytes);
+ dbg(DEBUG_ISO,
+ "Flow control (channel %d, sub %d): 0x%02x => %d",
+ channel, bcs->hw.bas->numsub, l,
+ atomic_read(&bcs->hw.bas->corrbytes));
+ break;
+
+ case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
+ if (!l) {
+ warn("HD_RECEIVEATDATA_ACK with length 0 ignored");
+ break;
+ }
+ spin_lock_irqsave(&cs->lock, flags);
+ if (ucs->rcvbuf_size) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ err("receive AT data overrun, %d bytes lost", l);
+ error_reset(cs); //FIXME reschedule
+ break;
+ }
+ if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ err("%s: out of memory, %d bytes lost", __func__, l);
+ error_reset(cs); //FIXME reschedule
+ break;
+ }
+ ucs->rcvbuf_size = l;
+ ucs->retry_cmd_in = 0;
+ if ((status = atread_submit(cs, BAS_TIMEOUT)) < 0) {
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ error_reset(cs); //FIXME reschedule
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ break;
+
+ case HD_RESET_INTERRUPT_PIPE_ACK:
+ dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK");
+ break;
+
+ case HD_SUSPEND_END:
+ dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
+ break;
+
+ default:
+ warn("unknown Gigaset signal 0x%02x (%u) ignored",
+ (int) ucs->int_in_buf[0], l);
+ }
+
+ check_pending(ucs);
+
+resubmit:
+ status = usb_submit_urb(urb, SLAB_ATOMIC);
+ if (unlikely(status)) {
+ err("could not resubmit interrupt URB: %s",
+ get_usb_statmsg(status));
+ error_reset(cs);
+ }
+}
+
+/* read_ctrl_callback
+ * USB completion handler for control pipe input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block
+ * urb->context = inbuf structure for controller state
+ */
+static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct cardstate *cs;
+ struct bas_cardstate *ucs;
+ unsigned numbytes;
+ unsigned long flags;
+ struct inbuf_t *inbuf;
+ int have_data = 0;
+
+ IFNULLRET(urb);
+ inbuf = (struct inbuf_t *) urb->context;
+ IFNULLRET(inbuf);
+ cs = inbuf->cs;
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!atomic_read(&cs->connected)) {
+ warn("%s: disconnected", __func__);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return;
+ }
+
+ if (!ucs->rcvbuf_size) {
+ warn("%s: no receive in progress", __func__);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return;
+ }
+
+ del_timer(&ucs->timer_cmd_in);
+
+ switch (urb->status) {
+ case 0: /* normal completion */
+ numbytes = urb->actual_length;
+ if (unlikely(numbytes == 0)) {
+ warn("control read: empty block received");
+ goto retry;
+ }
+ if (unlikely(numbytes != ucs->rcvbuf_size)) {
+ warn("control read: received %d chars, expected %d",
+ numbytes, ucs->rcvbuf_size);
+ if (numbytes > ucs->rcvbuf_size)
+ numbytes = ucs->rcvbuf_size;
+ }
+
+ /* copy received bytes to inbuf */
+ have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
+
+ if (unlikely(numbytes < ucs->rcvbuf_size)) {
+ /* incomplete - resubmit for remaining bytes */
+ ucs->rcvbuf_size -= numbytes;
+ ucs->retry_cmd_in = 0;
+ goto retry;
+ }
+ break;
+
+ case -ENOENT: /* canceled */
+ case -ECONNRESET: /* canceled (async) */
+ case -EINPROGRESS: /* pending */
+ /* no action necessary */
+ dbg(DEBUG_USBREQ,
+ "%s: %s", __func__, get_usb_statmsg(urb->status));
+ break;
+
+ default: /* severe trouble */
+ warn("control read: %s", get_usb_statmsg(urb->status));
+ retry:
+ if (ucs->retry_cmd_in++ < BAS_RETRY) {
+ notice("control read: retry %d", ucs->retry_cmd_in);
+ if (atread_submit(cs, BAS_TIMEOUT) >= 0) {
+ /* resubmitted - bypass regular exit block */
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return;
+ }
+ } else {
+ err("control read: giving up after %d tries",
+ ucs->retry_cmd_in);
+ }
+ error_reset(cs);
+ }
+
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (have_data) {
+ dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(cs);
+ }
+}
+
+/* read_iso_callback
+ * USB completion handler for B channel isochronous input
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = bc_state structure
+ */
+static void read_iso_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct bc_state *bcs;
+ struct bas_bc_state *ubc;
+ unsigned long flags;
+ int i, rc;
+
+ IFNULLRET(urb);
+ IFNULLRET(urb->context);
+ IFNULLRET(cardstate);
+
+ /* status codes not worth bothering the tasklet with */
+ if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -EINPROGRESS)) {
+ dbg(DEBUG_ISO,
+ "%s: %s", __func__, get_usb_statmsg(urb->status));
+ return;
+ }
+
+ bcs = (struct bc_state *) urb->context;
+ ubc = bcs->hw.bas;
+ IFNULLRET(ubc);
+
+ spin_lock_irqsave(&ubc->isoinlock, flags);
+ if (likely(ubc->isoindone == NULL)) {
+ /* pass URB to tasklet */
+ ubc->isoindone = urb;
+ tasklet_schedule(&ubc->rcvd_tasklet);
+ } else {
+ /* tasklet still busy, drop data and resubmit URB */
+ ubc->loststatus = urb->status;
+ for (i = 0; i < BAS_NUMFRAMES; i++) {
+ ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
+ if (unlikely(urb->iso_frame_desc[i].status != 0 &&
+ urb->iso_frame_desc[i].status != -EINPROGRESS)) {
+ ubc->loststatus = urb->iso_frame_desc[i].status;
+ }
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = 0;
+ }
+ if (likely(atomic_read(&ubc->running))) {
+ urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__);
+ rc = usb_submit_urb(urb, SLAB_ATOMIC);
+ if (unlikely(rc != 0)) {
+ err("could not resubmit isochronous read URB: %s",
+ get_usb_statmsg(rc));
+ dump_urb(DEBUG_ISO, "isoc read", urb);
+ error_hangup(bcs);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+}
+
+/* write_iso_callback
+ * USB completion handler for B channel isochronous output
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = isow_urbctx_t structure
+ */
+static void write_iso_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct isow_urbctx_t *ucx;
+ struct bas_bc_state *ubc;
+ unsigned long flags;
+
+ IFNULLRET(urb);
+ IFNULLRET(urb->context);
+ IFNULLRET(cardstate);
+
+ /* status codes not worth bothering the tasklet with */
+ if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -EINPROGRESS)) {
+ dbg(DEBUG_ISO,
+ "%s: %s", __func__, get_usb_statmsg(urb->status));
+ return;
+ }
+
+ /* pass URB context to tasklet */
+ ucx = (struct isow_urbctx_t *) urb->context;
+ IFNULLRET(ucx->bcs);
+ ubc = ucx->bcs->hw.bas;
+ IFNULLRET(ubc);
+
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ ubc->isooutovfl = ubc->isooutdone;
+ ubc->isooutdone = ucx;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ tasklet_schedule(&ubc->sent_tasklet);
+}
+
+/* starturbs
+ * prepare and submit USB request blocks for isochronous input and output
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success
+ * < 0 on error (no URBs submitted)
+ */
+static int starturbs(struct bc_state *bcs)
+{
+ struct urb *urb;
+ struct bas_bc_state *ubc;
+ int j, k;
+ int rc;
+
+ IFNULLRETVAL(bcs, -EFAULT);
+ ubc = bcs->hw.bas;
+ IFNULLRETVAL(ubc, -EFAULT);
+
+ /* initialize L2 reception */
+ if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
+ bcs->inputstate |= INS_flag_hunt;
+
+ /* submit all isochronous input URBs */
+ atomic_set(&ubc->running, 1);
+ for (k = 0; k < BAS_INURBS; k++) {
+ urb = ubc->isoinurbs[k];
+ if (!urb) {
+ err("isoinurbs[%d]==NULL", k);
+ rc = -EFAULT;
+ goto error;
+ }
+
+ urb->dev = bcs->cs->hw.bas->udev;
+ urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
+ urb->transfer_buffer_length = BAS_INBUFSIZE;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ urb->interval = BAS_FRAMETIME;
+ urb->complete = read_iso_callback;
+ urb->context = bcs;
+ for (j = 0; j < BAS_NUMFRAMES; j++) {
+ urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
+ urb->iso_frame_desc[j].length = BAS_MAXFRAME;
+ urb->iso_frame_desc[j].status = 0;
+ urb->iso_frame_desc[j].actual_length = 0;
+ }
+
+ dump_urb(DEBUG_ISO, "Initial isoc read", urb);
+ if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
+ err("could not submit isochronous read URB %d: %s",
+ k, get_usb_statmsg(rc));
+ goto error;
+ }
+ }
+
+ /* initialize L2 transmission */
+ gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
+
+ /* set up isochronous output URBs for flag idling */
+ for (k = 0; k < BAS_OUTURBS; ++k) {
+ urb = ubc->isoouturbs[k].urb;
+ if (!urb) {
+ err("isoouturbs[%d].urb==NULL", k);
+ rc = -EFAULT;
+ goto error;
+ }
+ urb->dev = bcs->cs->hw.bas->udev;
+ urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = ubc->isooutbuf->data;
+ urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
+ urb->number_of_packets = BAS_NUMFRAMES;
+ urb->interval = BAS_FRAMETIME;
+ urb->complete = write_iso_callback;
+ urb->context = &ubc->isoouturbs[k];
+ for (j = 0; j < BAS_NUMFRAMES; ++j) {
+ urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
+ urb->iso_frame_desc[j].length = BAS_NORMFRAME;
+ urb->iso_frame_desc[j].status = 0;
+ urb->iso_frame_desc[j].actual_length = 0;
+ }
+ ubc->isoouturbs[k].limit = -1;
+ }
+
+ /* submit two URBs, keep third one */
+ for (k = 0; k < 2; ++k) {
+ dump_urb(DEBUG_ISO, "Initial isoc write", urb);
+ rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
+ if (rc != 0) {
+ err("could not submit isochronous write URB %d: %s",
+ k, get_usb_statmsg(rc));
+ goto error;
+ }
+ }
+ dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
+ ubc->isooutfree = &ubc->isoouturbs[2];
+ ubc->isooutdone = ubc->isooutovfl = NULL;
+ return 0;
+ error:
+ stopurbs(ubc);
+ return rc;
+}
+
+/* stopurbs
+ * cancel the USB request blocks for isochronous input and output
+ * errors are silently ignored
+ * argument:
+ * B channel control structure
+ */
+static void stopurbs(struct bas_bc_state *ubc)
+{
+ int k, rc;
+
+ IFNULLRET(ubc);
+
+ atomic_set(&ubc->running, 0);
+
+ for (k = 0; k < BAS_INURBS; ++k) {
+ rc = usb_unlink_urb(ubc->isoinurbs[k]);
+ dbg(DEBUG_ISO, "%s: isoc input URB %d unlinked, result = %d",
+ __func__, k, rc);
+ }
+
+ for (k = 0; k < BAS_OUTURBS; ++k) {
+ rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
+ dbg(DEBUG_ISO, "%s: isoc output URB %d unlinked, result = %d",
+ __func__, k, rc);
+ }
+}
+
+/* Isochronous Write - Bottom Half */
+/* =============================== */
+
+/* submit_iso_write_urb
+ * fill and submit the next isochronous write URB
+ * parameters:
+ * bcs B channel state structure
+ * return value:
+ * number of frames submitted in URB
+ * 0 if URB not submitted because no data available (isooutbuf busy)
+ * error code < 0 on error
+ */
+static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
+{
+ struct urb *urb;
+ struct bas_bc_state *ubc;
+ struct usb_iso_packet_descriptor *ifd;
+ int corrbytes, nframe, rc;
+
+ IFNULLRETVAL(ucx, -EFAULT);
+ urb = ucx->urb;
+ IFNULLRETVAL(urb, -EFAULT);
+ IFNULLRETVAL(ucx->bcs, -EFAULT);
+ ubc = ucx->bcs->hw.bas;
+ IFNULLRETVAL(ubc, -EFAULT);
+
+ urb->dev = ucx->bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = ubc->isooutbuf->data;
+ urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
+
+ for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
+ ifd = &urb->iso_frame_desc[nframe];
+
+ /* compute frame length according to flow control */
+ ifd->length = BAS_NORMFRAME;
+ if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) {
+ dbg(DEBUG_ISO, "%s: corrbytes=%d", __func__, corrbytes);
+ if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
+ corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
+ else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
+ corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
+ ifd->length += corrbytes;
+ atomic_add(-corrbytes, &ubc->corrbytes);
+ }
+ //dbg(DEBUG_ISO, "%s: frame %d length=%d", __func__, nframe, ifd->length);
+
+ /* retrieve block of data to send */
+ ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
+ if (ifd->offset < 0) {
+ if (ifd->offset == -EBUSY) {
+ dbg(DEBUG_ISO, "%s: buffer busy at frame %d",
+ __func__, nframe);
+ /* tasklet will be restarted from gigaset_send_skb() */
+ } else {
+ err("%s: buffer error %d at frame %d",
+ __func__, ifd->offset, nframe);
+ return ifd->offset;
+ }
+ break;
+ }
+ ucx->limit = atomic_read(&ubc->isooutbuf->nextread);
+ ifd->status = 0;
+ ifd->actual_length = 0;
+ }
+ if ((urb->number_of_packets = nframe) > 0) {
+ if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
+ err("could not submit isochronous write URB: %s",
+ get_usb_statmsg(rc));
+ dump_urb(DEBUG_ISO, "isoc write", urb);
+ return rc;
+ }
+ ++ubc->numsub;
+ }
+ return nframe;
+}
+
+/* write_iso_tasklet
+ * tasklet scheduled when an isochronous output URB from the Gigaset device
+ * has completed
+ * parameter:
+ * data B channel state structure
+ */
+static void write_iso_tasklet(unsigned long data)
+{
+ struct bc_state *bcs;
+ struct bas_bc_state *ubc;
+ struct cardstate *cs;
+ struct isow_urbctx_t *done, *next, *ovfl;
+ struct urb *urb;
+ struct usb_iso_packet_descriptor *ifd;
+ int offset;
+ unsigned long flags;
+ int i;
+ struct sk_buff *skb;
+ int len;
+
+ bcs = (struct bc_state *) data;
+ IFNULLRET(bcs);
+ ubc = bcs->hw.bas;
+ IFNULLRET(ubc);
+ cs = bcs->cs;
+ IFNULLRET(cs);
+
+ /* loop while completed URBs arrive in time */
+ for (;;) {
+ if (unlikely(!atomic_read(&cs->connected))) {
+ warn("%s: disconnected", __func__);
+ return;
+ }
+
+ if (unlikely(!(atomic_read(&ubc->running)))) {
+ dbg(DEBUG_ISO, "%s: not running", __func__);
+ return;
+ }
+
+ /* retrieve completed URBs */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ done = ubc->isooutdone;
+ ubc->isooutdone = NULL;
+ ovfl = ubc->isooutovfl;
+ ubc->isooutovfl = NULL;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (ovfl) {
+ err("isochronous write buffer underrun - buy a faster machine :-)");
+ error_hangup(bcs);
+ break;
+ }
+ if (!done)
+ break;
+
+ /* submit free URB if available */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ next = ubc->isooutfree;
+ ubc->isooutfree = NULL;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ if (submit_iso_write_urb(next) <= 0) {
+ /* could not submit URB, put it back */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ if (ubc->isooutfree == NULL) {
+ ubc->isooutfree = next;
+ next = NULL;
+ }
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ /* couldn't put it back */
+ err("losing isochronous write URB");
+ error_hangup(bcs);
+ }
+ }
+ }
+
+ /* process completed URB */
+ urb = done->urb;
+ switch (urb->status) {
+ case 0: /* normal completion */
+ break;
+ case -EXDEV: /* inspect individual frames */
+ /* assumptions (for lack of documentation):
+ * - actual_length bytes of the frame in error are successfully sent
+ * - all following frames are not sent at all
+ */
+ dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
+ offset = done->limit; /* just in case */
+ for (i = 0; i < BAS_NUMFRAMES; i++) {
+ ifd = &urb->iso_frame_desc[i];
+ if (ifd->status ||
+ ifd->actual_length != ifd->length) {
+ warn("isochronous write: frame %d: %s, "
+ "only %d of %d bytes sent",
+ i, get_usb_statmsg(ifd->status),
+ ifd->actual_length, ifd->length);
+ offset = (ifd->offset +
+ ifd->actual_length)
+ % BAS_OUTBUFSIZE;
+ break;
+ }
+ }
+#ifdef CONFIG_GIGASET_DEBUG
+ /* check assumption on remaining frames */
+ for (; i < BAS_NUMFRAMES; i++) {
+ ifd = &urb->iso_frame_desc[i];
+ if (ifd->status != -EINPROGRESS
+ || ifd->actual_length != 0) {
+ warn("isochronous write: frame %d: %s, "
+ "%d of %d bytes sent",
+ i, get_usb_statmsg(ifd->status),
+ ifd->actual_length, ifd->length);
+ offset = (ifd->offset +
+ ifd->actual_length)
+ % BAS_OUTBUFSIZE;
+ break;
+ }
+ }
+#endif
+ break;
+ case -EPIPE: //FIXME is this the code for "underrun"?
+ err("isochronous write stalled");
+ error_hangup(bcs);
+ break;
+ default: /* severe trouble */
+ warn("isochronous write: %s",
+ get_usb_statmsg(urb->status));
+ }
+
+ /* mark the write buffer area covered by this URB as free */
+ if (done->limit >= 0)
+ atomic_set(&ubc->isooutbuf->read, done->limit);
+
+ /* mark URB as free */
+ spin_lock_irqsave(&ubc->isooutlock, flags);
+ next = ubc->isooutfree;
+ ubc->isooutfree = done;
+ spin_unlock_irqrestore(&ubc->isooutlock, flags);
+ if (next) {
+ /* only one URB still active - resubmit one */
+ if (submit_iso_write_urb(next) <= 0) {
+ /* couldn't submit */
+ error_hangup(bcs);
+ }
+ }
+ }
+
+ /* process queued SKBs */
+ while ((skb = skb_dequeue(&bcs->squeue))) {
+ /* copy to output buffer, doing L2 encapsulation */
+ len = skb->len;
+ if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
+ /* insufficient buffer space, push back onto queue */
+ skb_queue_head(&bcs->squeue, skb);
+ dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
+ __func__, skb_queue_len(&bcs->squeue));
+ break;
+ }
+ skb_pull(skb, len);
+ gigaset_skb_sent(bcs, skb);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Isochronous Read - Bottom Half */
+/* ============================== */
+
+/* read_iso_tasklet
+ * tasklet scheduled when an isochronous input URB from the Gigaset device
+ * has completed
+ * parameter:
+ * data B channel state structure
+ */
+static void read_iso_tasklet(unsigned long data)
+{
+ struct bc_state *bcs;
+ struct bas_bc_state *ubc;
+ struct cardstate *cs;
+ struct urb *urb;
+ char *rcvbuf;
+ unsigned long flags;
+ int totleft, numbytes, offset, frame, rc;
+
+ bcs = (struct bc_state *) data;
+ IFNULLRET(bcs);
+ ubc = bcs->hw.bas;
+ IFNULLRET(ubc);
+ cs = bcs->cs;
+ IFNULLRET(cs);
+
+ /* loop while more completed URBs arrive in the meantime */
+ for (;;) {
+ if (!atomic_read(&cs->connected)) {
+ warn("%s: disconnected", __func__);
+ return;
+ }
+
+ /* retrieve URB */
+ spin_lock_irqsave(&ubc->isoinlock, flags);
+ if (!(urb = ubc->isoindone)) {
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+ return;
+ }
+ ubc->isoindone = NULL;
+ if (unlikely(ubc->loststatus != -EINPROGRESS)) {
+ warn("isochronous read overrun, dropped URB with status: %s, %d bytes lost",
+ get_usb_statmsg(ubc->loststatus), ubc->isoinlost);
+ ubc->loststatus = -EINPROGRESS;
+ }
+ spin_unlock_irqrestore(&ubc->isoinlock, flags);
+
+ if (unlikely(!(atomic_read(&ubc->running)))) {
+ dbg(DEBUG_ISO, "%s: channel not running, dropped URB with status: %s",
+ __func__, get_usb_statmsg(urb->status));
+ return;
+ }
+
+ switch (urb->status) {
+ case 0: /* normal completion */
+ break;
+ case -EXDEV: /* inspect individual frames (we do that anyway) */
+ dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ dbg(DEBUG_ISO, "%s: URB canceled", __func__);
+ continue; /* -> skip */
+ case -EINPROGRESS: /* huh? */
+ dbg(DEBUG_ISO, "%s: URB still pending", __func__);
+ continue; /* -> skip */
+ case -EPIPE:
+ err("isochronous read stalled");
+ error_hangup(bcs);
+ continue; /* -> skip */
+ default: /* severe trouble */
+ warn("isochronous read: %s",
+ get_usb_statmsg(urb->status));
+ goto error;
+ }
+
+ rcvbuf = urb->transfer_buffer;
+ totleft = urb->actual_length;
+ for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
+ if (unlikely(urb->iso_frame_desc[frame].status)) {
+ warn("isochronous read: frame %d: %s",
+ frame, get_usb_statmsg(urb->iso_frame_desc[frame].status));
+ break;
+ }
+ numbytes = urb->iso_frame_desc[frame].actual_length;
+ if (unlikely(numbytes > BAS_MAXFRAME)) {
+ warn("isochronous read: frame %d: numbytes (%d) > BAS_MAXFRAME",
+ frame, numbytes);
+ break;
+ }
+ if (unlikely(numbytes > totleft)) {
+ warn("isochronous read: frame %d: numbytes (%d) > totleft (%d)",
+ frame, numbytes, totleft);
+ break;
+ }
+ offset = urb->iso_frame_desc[frame].offset;
+ if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
+ warn("isochronous read: frame %d: offset (%d) + numbytes (%d) > BAS_INBUFSIZE",
+ frame, offset, numbytes);
+ break;
+ }
+ gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
+ totleft -= numbytes;
+ }
+ if (unlikely(totleft > 0))
+ warn("isochronous read: %d data bytes missing",
+ totleft);
+
+ error:
+ /* URB processed, resubmit */
+ for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
+ urb->iso_frame_desc[frame].status = 0;
+ urb->iso_frame_desc[frame].actual_length = 0;
+ }
+ urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->number_of_packets = BAS_NUMFRAMES;
+ if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
+ err("could not resubmit isochronous read URB: %s",
+ get_usb_statmsg(rc));
+ dump_urb(DEBUG_ISO, "resubmit iso read", urb);
+ error_hangup(bcs);
+ }
+ }
+}
+
+/* Channel Operations */
+/* ================== */
+
+/* req_timeout
+ * timeout routine for control output request
+ * argument:
+ * B channel control structure
+ */
+static void req_timeout(unsigned long data)
+{
+ struct bc_state *bcs = (struct bc_state *) data;
+ struct bas_cardstate *ucs;
+ int pending;
+ unsigned long flags;
+
+ IFNULLRET(bcs);
+ IFNULLRET(bcs->cs);
+ ucs = bcs->cs->hw.bas;
+ IFNULLRET(ucs);
+
+ check_pending(ucs);
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ pending = ucs->pending;
+ ucs->pending = 0;
+ spin_unlock_irqrestore(&ucs->lock, flags);
+
+ switch (pending) {
+ case 0: /* no pending request */
+ dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
+ break;
+
+ case HD_OPEN_ATCHANNEL:
+ err("timeout opening AT channel");
+ error_reset(bcs->cs);
+ break;
+
+ case HD_OPEN_B2CHANNEL:
+ case HD_OPEN_B1CHANNEL:
+ err("timeout opening channel %d", bcs->channel + 1);
+ error_hangup(bcs);
+ break;
+
+ case HD_CLOSE_ATCHANNEL:
+ err("timeout closing AT channel");
+ //wake_up_interruptible(cs->initwait);
+ //FIXME need own wait queue?
+ break;
+
+ case HD_CLOSE_B2CHANNEL:
+ case HD_CLOSE_B1CHANNEL:
+ err("timeout closing channel %d", bcs->channel + 1);
+ break;
+
+ default:
+ warn("request 0x%02x timed out, clearing", pending);
+ }
+}
+
+/* write_ctrl_callback
+ * USB completion handler for control pipe output
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = hardware specific controller state structure
+ */
+static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct bas_cardstate *ucs;
+ unsigned long flags;
+
+ IFNULLRET(urb);
+ IFNULLRET(urb->context);
+ IFNULLRET(cardstate);
+
+ ucs = (struct bas_cardstate *) urb->context;
+ spin_lock_irqsave(&ucs->lock, flags);
+ if (urb->status && ucs->pending) {
+ err("control request 0x%02x failed: %s",
+ ucs->pending, get_usb_statmsg(urb->status));
+ del_timer(&ucs->timer_ctrl);
+ ucs->pending = 0;
+ }
+ /* individual handling of specific request types */
+ switch (ucs->pending) {
+ case HD_DEVICE_INIT_ACK: /* no reply expected */
+ ucs->pending = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&ucs->lock, flags);
+}
+
+/* req_submit
+ * submit a control output request without message buffer to the Gigaset base
+ * and optionally start a timeout
+ * parameters:
+ * bcs B channel control structure
+ * req control request code (HD_*)
+ * val control request parameter value (set to 0 if unused)
+ * timeout timeout in seconds (0: no timeout)
+ * return value:
+ * 0 on success
+ * -EINVAL if a NULL pointer is encountered somewhere
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
+{
+ struct bas_cardstate *ucs;
+ int ret;
+ unsigned long flags;
+
+ IFNULLRETVAL(bcs, -EINVAL);
+ IFNULLRETVAL(bcs->cs, -EINVAL);
+ ucs = bcs->cs->hw.bas;
+ IFNULLRETVAL(ucs, -EINVAL);
+ IFNULLRETVAL(ucs->urb_ctrl, -EINVAL);
+
+ dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
+
+ spin_lock_irqsave(&ucs->lock, flags);
+ if (ucs->pending) {
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ err("submission of request 0x%02x failed: request 0x%02x still pending",
+ req, ucs->pending);
+ return -EBUSY;
+ }
+ if (ucs->urb_ctrl->status == -EINPROGRESS) {
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ err("could not submit request 0x%02x: URB busy", req);
+ return -EBUSY;
+ }
+
+ ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
+ ucs->dr_ctrl.bRequest = req;
+ ucs->dr_ctrl.wValue = cpu_to_le16(val);
+ ucs->dr_ctrl.wIndex = 0;
+ ucs->dr_ctrl.wLength = 0;
+ usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ (unsigned char*) &ucs->dr_ctrl, NULL, 0,
+ write_ctrl_callback, ucs);
+ if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) {
+ err("could not submit request 0x%02x: %s",
+ req, get_usb_statmsg(ret));
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return ret;
+ }
+ ucs->pending = req;
+
+ if (timeout > 0) {
+ dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
+ ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10;
+ ucs->timer_ctrl.data = (unsigned long) bcs;
+ ucs->timer_ctrl.function = req_timeout;
+ add_timer(&ucs->timer_ctrl);
+ }
+
+ spin_unlock_irqrestore(&ucs->lock, flags);
+ return 0;
+}
+
+/* gigaset_init_bchannel
+ * called by common.c to connect a B channel
+ * initialize isochronous I/O and tell the Gigaset base to open the channel
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success, error code < 0 on error
+ */
+static int gigaset_init_bchannel(struct bc_state *bcs)
+{
+ int req, ret;
+
+ IFNULLRETVAL(bcs, -EINVAL);
+
+ if ((ret = starturbs(bcs)) < 0) {
+ err("could not start isochronous I/O for channel %d",
+ bcs->channel + 1);
+ error_hangup(bcs);
+ return ret;
+ }
+
+ req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
+ if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
+ err("could not open channel %d: %s",
+ bcs->channel + 1, get_usb_statmsg(ret));
+ stopurbs(bcs->hw.bas);
+ error_hangup(bcs);
+ }
+ return ret;
+}
+
+/* gigaset_close_bchannel
+ * called by common.c to disconnect a B channel
+ * tell the Gigaset base to close the channel
+ * stopping isochronous I/O and LL notification will be done when the
+ * acknowledgement for the close arrives
+ * argument:
+ * B channel control structure
+ * return value:
+ * 0 on success, error code < 0 on error
+ */
+static int gigaset_close_bchannel(struct bc_state *bcs)
+{
+ int req, ret;
+
+ IFNULLRETVAL(bcs, -EINVAL);
+
+ if (!(atomic_read(&bcs->cs->hw.bas->basstate) &
+ (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
+ /* channel not running: just signal common.c */
+ gigaset_bchannel_down(bcs);
+ return 0;
+ }
+
+ req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
+ if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
+ err("could not submit HD_CLOSE_BxCHANNEL request: %s",
+ get_usb_statmsg(ret));
+ return ret;
+}
+
+/* Device Operations */
+/* ================= */
+
+/* complete_cb
+ * unqueue first command buffer from queue, waking any sleepers
+ * must be called with cs->cmdlock held
+ * parameter:
+ * cs controller state structure
+ */
+static void complete_cb(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb;
+
+ IFNULLRET(cs);
+ cb = cs->cmdbuf;
+ IFNULLRET(cb);
+
+ /* unqueue completed buffer */
+ cs->cmdbytes -= cs->curlen;
+ dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD,
+ "write_command: sent %u bytes, %u left",
+ cs->curlen, cs->cmdbytes);
+ if ((cs->cmdbuf = cb->next) != NULL) {
+ cs->cmdbuf->prev = NULL;
+ cs->curlen = cs->cmdbuf->len;
+ } else {
+ cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ }
+
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+
+ kfree(cb);
+}
+
+static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len);
+
+/* write_command_callback
+ * USB completion handler for AT command transmission
+ * called by the USB subsystem in interrupt context
+ * parameter:
+ * urb USB request block of completed request
+ * urb->context = controller state structure
+ */
+static void write_command_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct cardstate *cs;
+ unsigned long flags;
+ struct bas_cardstate *ucs;
+
+ IFNULLRET(urb);
+ cs = (struct cardstate *) urb->context;
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ /* check status */
+ switch (urb->status) {
+ case 0: /* normal completion */
+ break;
+ case -ENOENT: /* canceled */
+ case -ECONNRESET: /* canceled (async) */
+ case -EINPROGRESS: /* pending */
+ /* ignore silently */
+ dbg(DEBUG_USBREQ,
+ "%s: %s", __func__, get_usb_statmsg(urb->status));
+ return;
+ default: /* any failure */
+ if (++ucs->retry_cmd_out > BAS_RETRY) {
+ warn("command write: %s, giving up after %d retries",
+ get_usb_statmsg(urb->status), ucs->retry_cmd_out);
+ break;
+ }
+ if (cs->cmdbuf == NULL) {
+ warn("command write: %s, cannot retry - cmdbuf gone",
+ get_usb_statmsg(urb->status));
+ break;
+ }
+ notice("command write: %s, retry %d",
+ get_usb_statmsg(urb->status), ucs->retry_cmd_out);
+ if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
+ /* resubmitted - bypass regular exit block */
+ return;
+ /* command send failed, assume base still waiting */
+ update_basstate(ucs, BS_ATREADY, 0);
+ }
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ if (cs->cmdbuf != NULL)
+ complete_cb(cs);
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+}
+
+/* atrdy_timeout
+ * timeout routine for AT command transmission
+ * argument:
+ * controller state structure
+ */
+static void atrdy_timeout(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bas_cardstate *ucs;
+
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ warn("timeout waiting for HD_READY_SEND_ATDATA");
+
+ /* fake the missing signal - what else can I do? */
+ update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
+ start_cbsend(cs);
+}
+
+/* atwrite_submit
+ * submit an HD_WRITE_ATMESSAGE command URB
+ * parameters:
+ * cs controller state structure
+ * buf buffer containing command to send
+ * len length of command to send
+ * return value:
+ * 0 on success
+ * -EFAULT if a NULL pointer is encountered somewhere
+ * -EBUSY if another request is pending
+ * any URB submission error code
+ */
+static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
+{
+ struct bas_cardstate *ucs;
+ int ret;
+
+ IFNULLRETVAL(cs, -EFAULT);
+ ucs = cs->hw.bas;
+ IFNULLRETVAL(ucs, -EFAULT);
+ IFNULLRETVAL(ucs->urb_cmd_out, -EFAULT);
+
+ dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
+
+ if (ucs->urb_cmd_out->status == -EINPROGRESS) {
+ err("could not submit HD_WRITE_ATMESSAGE: URB busy");
+ return -EBUSY;
+ }
+
+ ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
+ ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
+ ucs->dr_cmd_out.wValue = 0;
+ ucs->dr_cmd_out.wIndex = 0;
+ ucs->dr_cmd_out.wLength = cpu_to_le16(len);
+ usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ (unsigned char*) &ucs->dr_cmd_out, buf, len,
+ write_command_callback, cs);
+
+ if ((ret = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC)) != 0) {
+ err("could not submit HD_WRITE_ATMESSAGE: %s",
+ get_usb_statmsg(ret));
+ return ret;
+ }
+
+ /* submitted successfully */
+ update_basstate(ucs, 0, BS_ATREADY);
+
+ /* start timeout if necessary */
+ if (!(atomic_read(&ucs->basstate) & BS_ATTIMER)) {
+ dbg(DEBUG_OUTPUT,
+ "setting ATREADY timeout of %d/10 secs", ATRDY_TIMEOUT);
+ ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10;
+ ucs->timer_atrdy.data = (unsigned long) cs;
+ ucs->timer_atrdy.function = atrdy_timeout;
+ add_timer(&ucs->timer_atrdy);
+ update_basstate(ucs, BS_ATTIMER, 0);
+ }
+ return 0;
+}
+
+/* start_cbsend
+ * start transmission of AT command queue if necessary
+ * parameter:
+ * cs controller state structure
+ * return value:
+ * 0 on success
+ * error code < 0 on error
+ */
+static int start_cbsend(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb;
+ struct bas_cardstate *ucs;
+ unsigned long flags;
+ int rc;
+ int retval = 0;
+
+ IFNULLRETVAL(cs, -EFAULT);
+ ucs = cs->hw.bas;
+ IFNULLRETVAL(ucs, -EFAULT);
+
+ /* check if AT channel is open */
+ if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) {
+ dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, "AT channel not open");
+ rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
+ if (rc < 0) {
+ err("could not open AT channel");
+ /* flush command queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ while (cs->cmdbuf != NULL)
+ complete_cb(cs);
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ }
+ return rc;
+ }
+
+ /* try to send first command in queue */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+
+ while ((cb = cs->cmdbuf) != NULL &&
+ atomic_read(&ucs->basstate) & BS_ATREADY) {
+ ucs->retry_cmd_out = 0;
+ rc = atwrite_submit(cs, cb->buf, cb->len);
+ if (unlikely(rc)) {
+ retval = rc;
+ complete_cb(cs);
+ }
+ }
+
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ return retval;
+}
+
+/* gigaset_write_cmd
+ * This function is called by the device independent part of the driver
+ * to transmit an AT command string to the Gigaset device.
+ * It encapsulates the device specific method for transmission over the
+ * direct USB connection to the base.
+ * The command string is added to the queue of commands to send, and
+ * USB transmission is started if necessary.
+ * parameters:
+ * cs controller state structure
+ * buf command string to send
+ * len number of bytes to send (max. IF_WRITEBUF)
+ * wake_tasklet tasklet to run when transmission is completed (NULL if none)
+ * return value:
+ * number of bytes queued on success
+ * error code < 0 on error
+ */
+static int gigaset_write_cmd(struct cardstate *cs,
+ const unsigned char *buf, int len,
+ struct tasklet_struct *wake_tasklet)
+{
+ struct cmdbuf_t *cb;
+ unsigned long flags;
+ int status;
+
+ gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+ "CMD Transmit", len, buf, 0);
+
+ if (!atomic_read(&cs->connected)) {
+ err("%s: not connected", __func__);
+ return -ENODEV;
+ }
+
+ if (len <= 0)
+ return 0; /* nothing to do */
+
+ if (len > IF_WRITEBUF)
+ len = IF_WRITEBUF;
+ if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+ err("%s: out of memory", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(cb->buf, buf, len);
+ cb->len = len;
+ cb->offset = 0;
+ cb->next = NULL;
+ cb->wake_tasklet = wake_tasklet;
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb->prev = cs->lastcmdbuf;
+ if (cs->lastcmdbuf)
+ cs->lastcmdbuf->next = cb;
+ else {
+ cs->cmdbuf = cb;
+ cs->curlen = len;
+ }
+ cs->cmdbytes += len;
+ cs->lastcmdbuf = cb;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ status = start_cbsend(cs);
+
+ return status < 0 ? status : len;
+}
+
+/* gigaset_write_room
+ * tty_driver.write_room interface routine
+ * return number of characters the driver will accept to be written via gigaset_write_cmd
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_write_room(struct cardstate *cs)
+{
+ return IF_WRITEBUF;
+}
+
+/* gigaset_chars_in_buffer
+ * tty_driver.chars_in_buffer interface routine
+ * return number of characters waiting to be sent
+ * parameter:
+ * controller state structure
+ * return value:
+ * number of characters
+ */
+static int gigaset_chars_in_buffer(struct cardstate *cs)
+{
+ unsigned long flags;
+ unsigned bytes;
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ bytes = cs->cmdbytes;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ return bytes;
+}
+
+/* gigaset_brkchars
+ * implementation of ioctl(GIGASET_BRKCHARS)
+ * parameter:
+ * controller state structure
+ * return value:
+ * -EINVAL (unimplemented function)
+ */
+static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+{
+ return -EINVAL;
+}
+
+
+/* Device Initialization/Shutdown */
+/* ============================== */
+
+/* Free hardware dependent part of the B channel structure
+ * parameter:
+ * bcs B channel structure
+ * return value:
+ * !=0 on success
+ */
+static int gigaset_freebcshw(struct bc_state *bcs)
+{
+ if (!bcs->hw.bas)
+ return 0;
+
+ if (bcs->hw.bas->isooutbuf)
+ kfree(bcs->hw.bas->isooutbuf);
+ kfree(bcs->hw.bas);
+ bcs->hw.bas = NULL;
+ return 1;
+}
+
+/* Initialize hardware dependent part of the B channel structure
+ * parameter:
+ * bcs B channel structure
+ * return value:
+ * !=0 on success
+ */
+static int gigaset_initbcshw(struct bc_state *bcs)
+{
+ int i;
+ struct bas_bc_state *ubc;
+
+ bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
+ if (!ubc) {
+ err("could not allocate bas_bc_state");
+ return 0;
+ }
+
+ atomic_set(&ubc->running, 0);
+ atomic_set(&ubc->corrbytes, 0);
+ spin_lock_init(&ubc->isooutlock);
+ for (i = 0; i < BAS_OUTURBS; ++i) {
+ ubc->isoouturbs[i].urb = NULL;
+ ubc->isoouturbs[i].bcs = bcs;
+ }
+ ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
+ ubc->numsub = 0;
+ if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) {
+ err("could not allocate isochronous output buffer");
+ kfree(ubc);
+ bcs->hw.bas = NULL;
+ return 0;
+ }
+ tasklet_init(&ubc->sent_tasklet,
+ &write_iso_tasklet, (unsigned long) bcs);
+
+ spin_lock_init(&ubc->isoinlock);
+ for (i = 0; i < BAS_INURBS; ++i)
+ ubc->isoinurbs[i] = NULL;
+ ubc->isoindone = NULL;
+ ubc->loststatus = -EINPROGRESS;
+ ubc->isoinlost = 0;
+ ubc->seqlen = 0;
+ ubc->inbyte = 0;
+ ubc->inbits = 0;
+ ubc->goodbytes = 0;
+ ubc->alignerrs = 0;
+ ubc->fcserrs = 0;
+ ubc->frameerrs = 0;
+ ubc->giants = 0;
+ ubc->runts = 0;
+ ubc->aborts = 0;
+ ubc->shared0s = 0;
+ ubc->stolen0s = 0;
+ tasklet_init(&ubc->rcvd_tasklet,
+ &read_iso_tasklet, (unsigned long) bcs);
+ return 1;
+}
+
+static void gigaset_reinitbcshw(struct bc_state *bcs)
+{
+ struct bas_bc_state *ubc = bcs->hw.bas;
+
+ atomic_set(&bcs->hw.bas->running, 0);
+ atomic_set(&bcs->hw.bas->corrbytes, 0);
+ bcs->hw.bas->numsub = 0;
+ spin_lock_init(&ubc->isooutlock);
+ spin_lock_init(&ubc->isoinlock);
+ ubc->loststatus = -EINPROGRESS;
+}
+
+static void gigaset_freecshw(struct cardstate *cs)
+{
+ struct bas_cardstate *ucs = cs->hw.bas;
+
+ del_timer(&ucs->timer_ctrl);
+ del_timer(&ucs->timer_atrdy);
+ del_timer(&ucs->timer_cmd_in);
+
+ kfree(cs->hw.bas);
+}
+
+static int gigaset_initcshw(struct cardstate *cs)
+{
+ struct bas_cardstate *ucs;
+
+ cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
+ if (!ucs)
+ return 0;
+
+ ucs->urb_cmd_in = NULL;
+ ucs->urb_cmd_out = NULL;
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+
+ spin_lock_init(&ucs->lock);
+ ucs->pending = 0;
+
+ atomic_set(&ucs->basstate, 0);
+ init_timer(&ucs->timer_ctrl);
+ init_timer(&ucs->timer_atrdy);
+ init_timer(&ucs->timer_cmd_in);
+
+ return 1;
+}
+
+/* freeurbs
+ * unlink and deallocate all URBs unconditionally
+ * caller must make sure that no commands are still in progress
+ * parameter:
+ * cs controller state structure
+ */
+static void freeurbs(struct cardstate *cs)
+{
+ struct bas_cardstate *ucs;
+ struct bas_bc_state *ubc;
+ int i, j;
+
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ for (j = 0; j < 2; ++j) {
+ ubc = cs->bcs[j].hw.bas;
+ IFNULLCONT(ubc);
+ for (i = 0; i < BAS_OUTURBS; ++i)
+ if (ubc->isoouturbs[i].urb) {
+ usb_kill_urb(ubc->isoouturbs[i].urb);
+ dbg(DEBUG_INIT,
+ "%s: isoc output URB %d/%d unlinked",
+ __func__, j, i);
+ usb_free_urb(ubc->isoouturbs[i].urb);
+ ubc->isoouturbs[i].urb = NULL;
+ }
+ for (i = 0; i < BAS_INURBS; ++i)
+ if (ubc->isoinurbs[i]) {
+ usb_kill_urb(ubc->isoinurbs[i]);
+ dbg(DEBUG_INIT,
+ "%s: isoc input URB %d/%d unlinked",
+ __func__, j, i);
+ usb_free_urb(ubc->isoinurbs[i]);
+ ubc->isoinurbs[i] = NULL;
+ }
+ }
+ if (ucs->urb_int_in) {
+ usb_kill_urb(ucs->urb_int_in);
+ dbg(DEBUG_INIT, "%s: interrupt input URB unlinked", __func__);
+ usb_free_urb(ucs->urb_int_in);
+ ucs->urb_int_in = NULL;
+ }
+ if (ucs->urb_cmd_out) {
+ usb_kill_urb(ucs->urb_cmd_out);
+ dbg(DEBUG_INIT, "%s: command output URB unlinked", __func__);
+ usb_free_urb(ucs->urb_cmd_out);
+ ucs->urb_cmd_out = NULL;
+ }
+ if (ucs->urb_cmd_in) {
+ usb_kill_urb(ucs->urb_cmd_in);
+ dbg(DEBUG_INIT, "%s: command input URB unlinked", __func__);
+ usb_free_urb(ucs->urb_cmd_in);
+ ucs->urb_cmd_in = NULL;
+ }
+ if (ucs->urb_ctrl) {
+ usb_kill_urb(ucs->urb_ctrl);
+ dbg(DEBUG_INIT, "%s: control output URB unlinked", __func__);
+ usb_free_urb(ucs->urb_ctrl);
+ ucs->urb_ctrl = NULL;
+ }
+}
+
+/* gigaset_probe
+ * This function is called when a new USB device is connected.
+ * It checks whether the new device is handled by this driver.
+ */
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *hostif;
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct cardstate *cs = NULL;
+ struct bas_cardstate *ucs = NULL;
+ struct bas_bc_state *ubc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i, j;
+ int ret;
+
+ IFNULLRETVAL(udev, -ENODEV);
+
+ dbg(DEBUG_ANY,
+ "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ /* See if the device offered us matches what we can accept */
+ if ((le16_to_cpu(udev->descriptor.idVendor) != USB_GIGA_VENDOR_ID) ||
+ (le16_to_cpu(udev->descriptor.idProduct) != USB_GIGA_PRODUCT_ID &&
+ le16_to_cpu(udev->descriptor.idProduct) != USB_4175_PRODUCT_ID &&
+ le16_to_cpu(udev->descriptor.idProduct) != USB_SX303_PRODUCT_ID &&
+ le16_to_cpu(udev->descriptor.idProduct) != USB_SX353_PRODUCT_ID)) {
+ dbg(DEBUG_ANY, "%s: unmatched ID - exiting", __func__);
+ return -ENODEV;
+ }
+
+ /* set required alternate setting */
+ hostif = interface->cur_altsetting;
+ if (hostif->desc.bAlternateSetting != 3) {
+ dbg(DEBUG_ANY,
+ "%s: wrong alternate setting %d - trying to switch",
+ __func__, hostif->desc.bAlternateSetting);
+ if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) {
+ warn("usb_set_interface failed, device %d interface %d altsetting %d",
+ udev->devnum, hostif->desc.bInterfaceNumber,
+ hostif->desc.bAlternateSetting);
+ return -ENODEV;
+ }
+ hostif = interface->cur_altsetting;
+ }
+
+ /* Reject application specific interfaces
+ */
+ if (hostif->desc.bInterfaceClass != 255) {
+ warn("%s: bInterfaceClass == %d",
+ __func__, hostif->desc.bInterfaceClass);
+ return -ENODEV;
+ }
+
+ info("%s: Device matched (Vendor: 0x%x, Product: 0x%x)",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ cs = gigaset_getunassignedcs(driver);
+ if (!cs) {
+ err("%s: no free cardstate", __func__);
+ return -ENODEV;
+ }
+ ucs = cs->hw.bas;
+ ucs->udev = udev;
+ ucs->interface = interface;
+
+ /* allocate URBs:
+ * - one for the interrupt pipe
+ * - three for the different uses of the default control pipe
+ * - three for each isochronous pipe
+ */
+ ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->urb_int_in) {
+ err("No free urbs available");
+ goto error;
+ }
+ ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->urb_cmd_in) {
+ err("No free urbs available");
+ goto error;
+ }
+ ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->urb_cmd_out) {
+ err("No free urbs available");
+ goto error;
+ }
+ ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->urb_ctrl) {
+ err("No free urbs available");
+ goto error;
+ }
+
+ for (j = 0; j < 2; ++j) {
+ ubc = cs->bcs[j].hw.bas;
+ for (i = 0; i < BAS_OUTURBS; ++i) {
+ ubc->isoouturbs[i].urb =
+ usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
+ if (!ubc->isoouturbs[i].urb) {
+ err("No free urbs available");
+ goto error;
+ }
+ }
+ for (i = 0; i < BAS_INURBS; ++i) {
+ ubc->isoinurbs[i] =
+ usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
+ if (!ubc->isoinurbs[i]) {
+ err("No free urbs available");
+ goto error;
+ }
+ }
+ }
+
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+
+ /* Fill the interrupt urb and send it to the core */
+ endpoint = &hostif->endpoint[0].desc;
+ usb_fill_int_urb(ucs->urb_int_in, udev,
+ usb_rcvintpipe(udev,
+ (endpoint->bEndpointAddress) & 0x0f),
+ ucs->int_in_buf, 3, read_int_callback, cs,
+ endpoint->bInterval);
+ ret = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL);
+ if (ret) {
+ err("could not submit interrupt URB: %s", get_usb_statmsg(ret));
+ goto error;
+ }
+
+ /* tell the device that the driver is ready */
+ if ((ret = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0)
+ goto error;
+
+ /* tell common part that the device is ready */
+ if (startmode == SM_LOCKED)
+ atomic_set(&cs->mstate, MS_LOCKED);
+ if (!gigaset_start(cs))
+ goto error;
+
+ /* save address of controller structure */
+ usb_set_intfdata(interface, cs);
+
+ /* set up device sysfs */
+ gigaset_init_dev_sysfs(interface);
+ return 0;
+
+error:
+ freeurbs(cs);
+ gigaset_unassign(cs);
+ return -ENODEV;
+}
+
+/* gigaset_disconnect
+ * This function is called when the Gigaset base is unplugged.
+ */
+static void gigaset_disconnect(struct usb_interface *interface)
+{
+ struct cardstate *cs;
+ struct bas_cardstate *ucs;
+
+ /* clear device sysfs */
+ gigaset_free_dev_sysfs(interface);
+
+ cs = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ IFNULLRET(cs);
+ ucs = cs->hw.bas;
+ IFNULLRET(ucs);
+
+ info("disconnecting GigaSet base");
+ gigaset_stop(cs);
+ freeurbs(cs);
+ kfree(ucs->rcvbuf);
+ ucs->rcvbuf = NULL;
+ ucs->rcvbuf_size = 0;
+ atomic_set(&ucs->basstate, 0);
+ gigaset_unassign(cs);
+}
+
+static struct gigaset_ops gigops = {
+ gigaset_write_cmd,
+ gigaset_write_room,
+ gigaset_chars_in_buffer,
+ gigaset_brkchars,
+ gigaset_init_bchannel,
+ gigaset_close_bchannel,
+ gigaset_initbcshw,
+ gigaset_freebcshw,
+ gigaset_reinitbcshw,
+ gigaset_initcshw,
+ gigaset_freecshw,
+ gigaset_set_modem_ctrl,
+ gigaset_baud_rate,
+ gigaset_set_line_ctrl,
+ gigaset_isoc_send_skb,
+ gigaset_isoc_input,
+};
+
+/* bas_gigaset_init
+ * This function is called after the kernel module is loaded.
+ */
+static int __init bas_gigaset_init(void)
+{
+ int result;
+
+ /* allocate memory for our driver state and intialize it */
+ if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ GIGASET_MODULENAME, GIGASET_DEVNAME,
+ GIGASET_DEVFSNAME, &gigops,
+ THIS_MODULE)) == NULL)
+ goto error;
+
+ /* allocate memory for our device state and intialize it */
+ cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode, GIGASET_MODULENAME);
+ if (!cardstate)
+ goto error;
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&gigaset_usb_driver);
+ if (result < 0) {
+ err("usb_register failed (error %d)", -result);
+ goto error;
+ }
+
+ info(DRIVER_AUTHOR);
+ info(DRIVER_DESC);
+ return 0;
+
+error: if (cardstate)
+ gigaset_freecs(cardstate);
+ cardstate = NULL;
+ if (driver)
+ gigaset_freedriver(driver);
+ driver = NULL;
+ return -1;
+}
+
+/* bas_gigaset_exit
+ * This function is called before the kernel module is unloaded.
+ */
+static void __exit bas_gigaset_exit(void)
+{
+ gigaset_blockdriver(driver); /* => probe will fail
+ * => no gigaset_start any more
+ */
+
+ gigaset_shutdown(cardstate);
+ /* from now on, no isdn callback should be possible */
+
+ if (atomic_read(&cardstate->hw.bas->basstate) & BS_ATOPEN) {
+ dbg(DEBUG_ANY, "closing AT channel");
+ if (req_submit(cardstate->bcs,
+ HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT) >= 0) {
+ /* successfully submitted - wait for completion */
+ //wait_event_interruptible(cs->initwait, !cs->hw.bas->pending);
+ //FIXME need own wait queue? wakeup?
+ }
+ }
+
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&gigaset_usb_driver);
+ /* this will call the disconnect-callback */
+ /* from now on, no disconnect/probe callback should be running */
+
+ gigaset_freecs(cardstate);
+ cardstate = NULL;
+ gigaset_freedriver(driver);
+ driver = NULL;
+}
+
+
+module_init(bas_gigaset_init);
+module_exit(bas_gigaset_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
new file mode 100644
index 00000000000..64371995c1a
--- /dev/null
+++ b/drivers/isdn/gigaset/common.c
@@ -0,0 +1,1203 @@
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: common.c,v 1.104.4.22 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers <Eilers.Stefan@epost.de>"
+#define DRIVER_DESC "Driver for Gigaset 307x"
+
+/* Module parameters */
+int gigaset_debuglevel = DEBUG_DEFAULT;
+EXPORT_SYMBOL_GPL(gigaset_debuglevel);
+module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug, "debug level");
+
+/*======================================================================
+ Prototypes of internal functions
+ */
+
+//static void gigaset_process_response(int resp_code, int parameter,
+// struct at_state_t *at_state,
+// unsigned char ** pstring);
+static struct cardstate *alloc_cs(struct gigaset_driver *drv);
+static void free_cs(struct cardstate *cs);
+static void make_valid(struct cardstate *cs, unsigned mask);
+static void make_invalid(struct cardstate *cs, unsigned mask);
+
+#define VALID_MINOR 0x01
+#define VALID_ID 0x02
+#define ASSIGNED 0x04
+
+/* bitwise byte inversion table */
+__u8 gigaset_invtab[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
+};
+EXPORT_SYMBOL_GPL(gigaset_invtab);
+
+void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
+ size_t len, const unsigned char *buf, int from_user)
+{
+ unsigned char outbuf[80];
+ unsigned char inbuf[80 - 1];
+ size_t numin;
+ const unsigned char *in;
+ size_t space = sizeof outbuf - 1;
+ unsigned char *out = outbuf;
+
+ if (!from_user) {
+ in = buf;
+ numin = len;
+ } else {
+ numin = len < sizeof inbuf ? len : sizeof inbuf;
+ in = inbuf;
+ if (copy_from_user(inbuf, (const unsigned char __user *) buf, numin)) {
+ strncpy(inbuf, "<FAULT>", sizeof inbuf);
+ numin = sizeof "<FAULT>" - 1;
+ }
+ }
+
+ for (; numin && space; --numin, ++in) {
+ --space;
+ if (*in >= 32)
+ *out++ = *in;
+ else {
+ *out++ = '^';
+ if (space) {
+ *out++ = '@' + *in;
+ --space;
+ }
+ }
+ }
+ *out = 0;
+
+ dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
+}
+EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
+
+static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
+{
+ int r;
+
+ r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
+ cs->control_state = flags;
+ if (r < 0)
+ return r;
+
+ if (delay) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay * HZ / 1000);
+ }
+
+ return 0;
+}
+
+int gigaset_enterconfigmode(struct cardstate *cs)
+{
+ int i, r;
+
+ if (!atomic_read(&cs->connected)) {
+ err("not connected!");
+ return -1;
+ }
+
+ cs->control_state = TIOCM_RTS; //FIXME
+
+ r = setflags(cs, TIOCM_DTR, 200);
+ if (r < 0)
+ goto error;
+ r = setflags(cs, 0, 200);
+ if (r < 0)
+ goto error;
+ for (i = 0; i < 5; ++i) {
+ r = setflags(cs, TIOCM_RTS, 100);
+ if (r < 0)
+ goto error;
+ r = setflags(cs, 0, 100);
+ if (r < 0)
+ goto error;
+ }
+ r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800);
+ if (r < 0)
+ goto error;
+
+ return 0;
+
+error:
+ err("error %d on setuartbits!\n", -r);
+ cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value?
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
+
+ return -1; //r
+}
+
+static int test_timeout(struct at_state_t *at_state)
+{
+ if (!at_state->timer_expires)
+ return 0;
+
+ if (--at_state->timer_expires) {
+ dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
+ at_state, at_state->timer_expires);
+ return 0;
+ }
+
+ if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
+ atomic_read(&at_state->timer_index), NULL)) {
+ //FIXME what should we do?
+ }
+
+ return 1;
+}
+
+static void timer_tick(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ unsigned long flags;
+ unsigned channel;
+ struct at_state_t *at_state;
+ int timeout = 0;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ for (channel = 0; channel < cs->channels; ++channel)
+ if (test_timeout(&cs->bcs[channel].at_state))
+ timeout = 1;
+
+ if (test_timeout(&cs->at_state))
+ timeout = 1;
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (test_timeout(at_state))
+ timeout = 1;
+
+ if (atomic_read(&cs->running)) {
+ mod_timer(&cs->timer, jiffies + GIG_TICK);
+ if (timeout) {
+ dbg(DEBUG_CMD, "scheduling timeout");
+ tasklet_schedule(&cs->event_tasklet);
+ }
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+int gigaset_get_channel(struct bc_state *bcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->cs->lock, flags);
+ if (bcs->use_count) {
+ dbg(DEBUG_ANY, "could not allocate channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return 0;
+ }
+ ++bcs->use_count;
+ bcs->busy = 1;
+ dbg(DEBUG_ANY, "allocated channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return 1;
+}
+
+void gigaset_free_channel(struct bc_state *bcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcs->cs->lock, flags);
+ if (!bcs->busy) {
+ dbg(DEBUG_ANY, "could not free channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ return;
+ }
+ --bcs->use_count;
+ bcs->busy = 0;
+ dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
+ spin_unlock_irqrestore(&bcs->cs->lock, flags);
+}
+
+int gigaset_get_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].use_count) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ dbg(DEBUG_ANY, "could not allocated all channels");
+ return 0;
+ }
+ for (i = 0; i < cs->channels; ++i)
+ ++cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ dbg(DEBUG_ANY, "allocated all channels");
+
+ return 1;
+}
+
+void gigaset_free_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ dbg(DEBUG_ANY, "unblocking all channels");
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ --cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+void gigaset_block_channels(struct cardstate *cs)
+{
+ unsigned long flags;
+ int i;
+
+ dbg(DEBUG_ANY, "blocking all channels");
+ spin_lock_irqsave(&cs->lock, flags);
+ for (i = 0; i < cs->channels; ++i)
+ ++cs->bcs[i].use_count;
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+static void clear_events(struct cardstate *cs)
+{
+ struct event_t *ev;
+ unsigned head, tail;
+
+ /* no locking needed (no reader/writer allowed) */
+
+ head = atomic_read(&cs->ev_head);
+ tail = atomic_read(&cs->ev_tail);
+
+ while (tail != head) {
+ ev = cs->events + head;
+ kfree(ev->ptr);
+
+ head = (head + 1) % MAX_EVENTS;
+ }
+
+ atomic_set(&cs->ev_head, tail);
+}
+
+struct event_t *gigaset_add_event(struct cardstate *cs,
+ struct at_state_t *at_state, int type,
+ void *ptr, int parameter, void *arg)
+{
+ unsigned long flags;
+ unsigned next, tail;
+ struct event_t *event = NULL;
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+
+ tail = atomic_read(&cs->ev_tail);
+ next = (tail + 1) % MAX_EVENTS;
+ if (unlikely(next == atomic_read(&cs->ev_head)))
+ err("event queue full");
+ else {
+ event = cs->events + tail;
+ event->type = type;
+ event->at_state = at_state;
+ event->cid = -1;
+ event->ptr = ptr;
+ event->arg = arg;
+ event->parameter = parameter;
+ atomic_set(&cs->ev_tail, next);
+ }
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+
+ return event;
+}
+EXPORT_SYMBOL_GPL(gigaset_add_event);
+
+static void free_strings(struct at_state_t *at_state)
+{
+ int i;
+
+ for (i = 0; i < STR_NUM; ++i) {
+ kfree(at_state->str_var[i]);
+ at_state->str_var[i] = NULL;
+ }
+}
+
+static void clear_at_state(struct at_state_t *at_state)
+{
+ free_strings(at_state);
+}
+
+static void dealloc_at_states(struct cardstate *cs)
+{
+ struct at_state_t *cur, *next;
+
+ list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
+ list_del(&cur->list);
+ free_strings(cur);
+ kfree(cur);
+ }
+}
+
+static void gigaset_freebcs(struct bc_state *bcs)
+{
+ int i;
+
+ dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
+ if (!bcs->cs->ops->freebcshw(bcs)) {
+ dbg(DEBUG_INIT, "failed");
+ }
+
+ dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
+ clear_at_state(&bcs->at_state);
+ dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
+
+ if (bcs->skb)
+ dev_kfree_skb(bcs->skb);
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = NULL;
+ }
+}
+
+void gigaset_freecs(struct cardstate *cs)
+{
+ int i;
+ unsigned long flags;
+
+ if (!cs)
+ return;
+
+ down(&cs->sem);
+
+ if (!cs->bcs)
+ goto f_cs;
+ if (!cs->inbuf)
+ goto f_bcs;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ atomic_set(&cs->running, 0);
+ spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */
+
+ tasklet_kill(&cs->event_tasklet);
+ del_timer_sync(&cs->timer);
+
+ switch (cs->cs_init) {
+ default:
+ gigaset_if_free(cs);
+
+ dbg(DEBUG_INIT, "clearing hw");
+ cs->ops->freecshw(cs);
+
+ //FIXME cmdbuf
+
+ /* fall through */
+ case 2: /* error in initcshw */
+ /* Deregister from LL */
+ make_invalid(cs, VALID_ID);
+ dbg(DEBUG_INIT, "clearing iif");
+ gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
+
+ /* fall through */
+ case 1: /* error when regestering to LL */
+ dbg(DEBUG_INIT, "clearing at_state");
+ clear_at_state(&cs->at_state);
+ dealloc_at_states(cs);
+
+ /* fall through */
+ case 0: /* error in one call to initbcs */
+ for (i = 0; i < cs->channels; ++i) {
+ dbg(DEBUG_INIT, "clearing bcs[%d]", i);
+ gigaset_freebcs(cs->bcs + i);
+ }
+
+ clear_events(cs);
+ dbg(DEBUG_INIT, "freeing inbuf");
+ kfree(cs->inbuf);
+ }
+f_bcs: dbg(DEBUG_INIT, "freeing bcs[]");
+ kfree(cs->bcs);
+f_cs: dbg(DEBUG_INIT, "freeing cs");
+ up(&cs->sem);
+ free_cs(cs);
+}
+EXPORT_SYMBOL_GPL(gigaset_freecs);
+
+void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
+ struct cardstate *cs, int cid)
+{
+ int i;
+
+ INIT_LIST_HEAD(&at_state->list);
+ at_state->waiting = 0;
+ at_state->getstring = 0;
+ at_state->pending_commands = 0;
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ atomic_set(&at_state->timer_index, 0);
+ atomic_set(&at_state->seq_index, 0);
+ at_state->ConState = 0;
+ for (i = 0; i < STR_NUM; ++i)
+ at_state->str_var[i] = NULL;
+ at_state->int_var[VAR_ZDLE] = 0;
+ at_state->int_var[VAR_ZCTP] = -1;
+ at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
+ at_state->cs = cs;
+ at_state->bcs = bcs;
+ at_state->cid = cid;
+ if (!cid)
+ at_state->replystruct = cs->tabnocid;
+ else
+ at_state->replystruct = cs->tabcid;
+}
+
+
+static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
+ struct cardstate *cs, int inputstate)
+/* inbuf->read must be allocated before! */
+{
+ atomic_set(&inbuf->head, 0);
+ atomic_set(&inbuf->tail, 0);
+ inbuf->cs = cs;
+ inbuf->bcs = bcs; /*base driver: NULL*/
+ inbuf->rcvbuf = NULL; //FIXME
+ inbuf->inputstate = inputstate;
+}
+
+/* Initialize the b-channel structure */
+static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
+ struct cardstate *cs, int channel)
+{
+ int i;
+
+ bcs->tx_skb = NULL; //FIXME -> hw part
+
+ skb_queue_head_init(&bcs->squeue);
+
+ bcs->corrupted = 0;
+ bcs->trans_down = 0;
+ bcs->trans_up = 0;
+
+ dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
+ gigaset_at_init(&bcs->at_state, bcs, cs, -1);
+
+ bcs->rcvbytes = 0;
+
+#ifdef CONFIG_GIGASET_DEBUG
+ bcs->emptycount = 0;
+#endif
+
+ dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel);
+ bcs->fcs = PPP_INITFCS;
+ bcs->inputstate = 0;
+ if (cs->ignoreframes) {
+ bcs->inputstate |= INS_skip_frame;
+ bcs->skb = NULL;
+ } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
+ skb_reserve(bcs->skb, HW_HDR_LEN);
+ else {
+ warn("could not allocate skb");
+ bcs->inputstate |= INS_skip_frame;
+ }
+
+ bcs->channel = channel;
+ bcs->cs = cs;
+
+ bcs->chstate = 0;
+ bcs->use_count = 1;
+ bcs->busy = 0;
+ bcs->ignore = cs->ignoreframes;
+
+ for (i = 0; i < AT_NUM; ++i)
+ bcs->commands[i] = NULL;
+
+ dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
+ if (cs->ops->initbcshw(bcs))
+ return bcs;
+
+//error:
+ dbg(DEBUG_INIT, " failed");
+
+ dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
+ if (bcs->skb)
+ dev_kfree_skb(bcs->skb);
+
+ return NULL;
+}
+
+/* gigaset_initcs
+ * Allocate and initialize cardstate structure for Gigaset driver
+ * Calls hardware dependent gigaset_initcshw() function
+ * Calls B channel initialization function gigaset_initbcs() for each B channel
+ * parameters:
+ * drv hardware driver the device belongs to
+ * channels number of B channels supported by device
+ * onechannel !=0: B channel data and AT commands share one communication channel
+ * ==0: B channels have separate communication channels
+ * ignoreframes number of frames to ignore after setting up B channel
+ * cidmode !=0: start in CallID mode
+ * modulename name of driver module (used for I4L registration)
+ * return value:
+ * pointer to cardstate structure
+ */
+struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
+ int onechannel, int ignoreframes,
+ int cidmode, const char *modulename)
+{
+ struct cardstate *cs = NULL;
+ int i;
+
+ dbg(DEBUG_INIT, "allocating cs");
+ cs = alloc_cs(drv);
+ if (!cs)
+ goto error;
+ dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
+ cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
+ if (!cs->bcs)
+ goto error;
+ dbg(DEBUG_INIT, "allocating inbuf");
+ cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
+ if (!cs->inbuf)
+ goto error;
+
+ cs->cs_init = 0;
+ cs->channels = channels;
+ cs->onechannel = onechannel;
+ cs->ignoreframes = ignoreframes;
+ INIT_LIST_HEAD(&cs->temp_at_states);
+ atomic_set(&cs->running, 0);
+ init_timer(&cs->timer); /* clear next & prev */
+ spin_lock_init(&cs->ev_lock);
+ atomic_set(&cs->ev_tail, 0);
+ atomic_set(&cs->ev_head, 0);
+ init_MUTEX_LOCKED(&cs->sem);
+ tasklet_init(&cs->event_tasklet, &gigaset_handle_event, (unsigned long) cs);
+ atomic_set(&cs->commands_pending, 0);
+ cs->cur_at_seq = 0;
+ cs->gotfwver = -1;
+ cs->open_count = 0;
+ cs->tty = NULL;
+ atomic_set(&cs->cidmode, cidmode != 0);
+
+ //if(onechannel) { //FIXME
+ cs->tabnocid = gigaset_tab_nocid_m10x;
+ cs->tabcid = gigaset_tab_cid_m10x;
+ //} else {
+ // cs->tabnocid = gigaset_tab_nocid;
+ // cs->tabcid = gigaset_tab_cid;
+ //}
+
+ init_waitqueue_head(&cs->waitqueue);
+ cs->waiting = 0;
+
+ atomic_set(&cs->mode, M_UNKNOWN);
+ atomic_set(&cs->mstate, MS_UNINITIALIZED);
+
+ for (i = 0; i < channels; ++i) {
+ dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
+ if (!gigaset_initbcs(cs->bcs + i, cs, i))
+ goto error;
+ }
+
+ ++cs->cs_init;
+
+ dbg(DEBUG_INIT, "setting up at_state");
+ spin_lock_init(&cs->lock);
+ gigaset_at_init(&cs->at_state, NULL, cs, 0);
+ cs->dle = 0;
+ cs->cbytes = 0;
+
+ dbg(DEBUG_INIT, "setting up inbuf");
+ if (onechannel) { //FIXME distinction necessary?
+ gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
+ } else
+ gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
+
+ atomic_set(&cs->connected, 0);
+
+ dbg(DEBUG_INIT, "setting up cmdbuf");
+ cs->cmdbuf = cs->lastcmdbuf = NULL;
+ spin_lock_init(&cs->cmdlock);
+ cs->curlen = 0;
+ cs->cmdbytes = 0;
+
+ /*
+ * Tell the ISDN4Linux subsystem (the LL) that
+ * a driver for a USB-Device is available !
+ * If this is done, "isdnctrl" is able to bind a device for this driver even
+ * if no physical usb-device is currently connected.
+ * But this device will just be accessable if a physical USB device is connected
+ * (via "gigaset_probe") .
+ */
+ dbg(DEBUG_INIT, "setting up iif");
+ if (!gigaset_register_to_LL(cs, modulename)) {
+ err("register_isdn=>error");
+ goto error;
+ }
+
+ make_valid(cs, VALID_ID);
+ ++cs->cs_init;
+ dbg(DEBUG_INIT, "setting up hw");
+ if (!cs->ops->initcshw(cs))
+ goto error;
+
+ ++cs->cs_init;
+
+ gigaset_if_init(cs);
+
+ atomic_set(&cs->running, 1);
+ cs->timer.data = (unsigned long) cs;
+ cs->timer.function = timer_tick;
+ cs->timer.expires = jiffies + GIG_TICK;
+ /* FIXME: can jiffies increase too much until the timer is added?
+ * Same problem(?) with mod_timer() in timer_tick(). */
+ add_timer(&cs->timer);
+
+ dbg(DEBUG_INIT, "cs initialized!");
+ up(&cs->sem);
+ return cs;
+
+error: if (cs)
+ up(&cs->sem);
+ dbg(DEBUG_INIT, "failed");
+ gigaset_freecs(cs);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gigaset_initcs);
+
+/* ReInitialize the b-channel structure */ /* e.g. called on hangup, disconnect */
+void gigaset_bcs_reinit(struct bc_state *bcs)
+{
+ struct sk_buff *skb;
+ struct cardstate *cs = bcs->cs;
+ unsigned long flags;
+
+ while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_lock_irqsave(&cs->lock, flags); //FIXME
+ clear_at_state(&bcs->at_state);
+ bcs->at_state.ConState = 0;
+ bcs->at_state.timer_active = 0;
+ bcs->at_state.timer_expires = 0;
+ bcs->at_state.cid = -1; /* No CID defined */
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ bcs->inputstate = 0;
+
+#ifdef CONFIG_GIGASET_DEBUG
+ bcs->emptycount = 0;
+#endif
+
+ bcs->fcs = PPP_INITFCS;
+ bcs->chstate = 0;
+
+ bcs->ignore = cs->ignoreframes;
+ if (bcs->ignore)
+ bcs->inputstate |= INS_skip_frame;
+
+
+ cs->ops->reinitbcshw(bcs);
+}
+
+static void cleanup_cs(struct cardstate *cs)
+{
+ struct cmdbuf_t *cb, *tcb;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ atomic_set(&cs->mode, M_UNKNOWN);
+ atomic_set(&cs->mstate, MS_UNINITIALIZED);
+
+ clear_at_state(&cs->at_state);
+ dealloc_at_states(cs);
+ free_strings(&cs->at_state);
+ gigaset_at_init(&cs->at_state, NULL, cs, 0);
+
+ kfree(cs->inbuf->rcvbuf);
+ cs->inbuf->rcvbuf = NULL;
+ cs->inbuf->inputstate = INS_command;
+ atomic_set(&cs->inbuf->head, 0);
+ atomic_set(&cs->inbuf->tail, 0);
+
+ cb = cs->cmdbuf;
+ while (cb) {
+ tcb = cb;
+ cb = cb->next;
+ kfree(tcb);
+ }
+ cs->cmdbuf = cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ cs->cmdbytes = 0;
+ cs->gotfwver = -1;
+ cs->dle = 0;
+ cs->cur_at_seq = 0;
+ atomic_set(&cs->commands_pending, 0);
+ cs->cbytes = 0;
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ for (i = 0; i < cs->channels; ++i) {
+ gigaset_freebcs(cs->bcs + i);
+ if (!gigaset_initbcs(cs->bcs + i, cs, i))
+ break; //FIXME error handling
+ }
+
+ if (cs->waiting) {
+ cs->cmd_result = -ENODEV;
+ cs->waiting = 0;
+ wake_up_interruptible(&cs->waitqueue);
+ }
+}
+
+
+int gigaset_start(struct cardstate *cs)
+{
+ if (down_interruptible(&cs->sem))
+ return 0;
+ //info("USB device for Gigaset 307x now attached to Dev %d", ucs->minor);
+
+ atomic_set(&cs->connected, 1);
+
+ if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
+ cs->ops->baud_rate(cs, B115200);
+ cs->ops->set_line_ctrl(cs, CS8);
+ cs->control_state = TIOCM_DTR|TIOCM_RTS;
+ } else {
+ //FIXME use some saved values?
+ }
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
+ cs->waiting = 0;
+ //FIXME what should we do?
+ goto error;
+ }
+
+ dbg(DEBUG_CMD, "scheduling START");
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ up(&cs->sem);
+ return 1;
+
+error:
+ up(&cs->sem);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gigaset_start);
+
+void gigaset_shutdown(struct cardstate *cs)
+{
+ down(&cs->sem);
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
+ //FIXME what should we do?
+ goto exit;
+ }
+
+ dbg(DEBUG_CMD, "scheduling SHUTDOWN");
+ gigaset_schedule_event(cs);
+
+ if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
+ warn("aborted");
+ //FIXME
+ }
+
+ if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ //FIXME?
+ //gigaset_baud_rate(cs, B115200);
+ //gigaset_set_line_ctrl(cs, CS8);
+ //gigaset_set_modem_ctrl(cs, TIOCM_DTR|TIOCM_RTS, 0);
+ //cs->control_state = 0;
+ } else {
+ //FIXME use some saved values?
+ }
+
+ cleanup_cs(cs);
+
+exit:
+ up(&cs->sem);
+}
+EXPORT_SYMBOL_GPL(gigaset_shutdown);
+
+void gigaset_stop(struct cardstate *cs)
+{
+ down(&cs->sem);
+
+ atomic_set(&cs->connected, 0);
+
+ cs->waiting = 1;
+
+ if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
+ //FIXME what should we do?
+ goto exit;
+ }
+
+ dbg(DEBUG_CMD, "scheduling STOP");
+ gigaset_schedule_event(cs);
+
+ if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
+ warn("aborted");
+ //FIXME
+ }
+
+ /* Tell the LL that the device is not available .. */
+ gigaset_i4l_cmd(cs, ISDN_STAT_STOP); // FIXME move to event layer?
+
+ cleanup_cs(cs);
+
+exit:
+ up(&cs->sem);
+}
+EXPORT_SYMBOL_GPL(gigaset_stop);
+
+static LIST_HEAD(drivers);
+static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
+
+struct cardstate *gigaset_get_cs_by_id(int id)
+{
+ unsigned long flags;
+ static struct cardstate *ret = NULL;
+ static struct cardstate *cs;
+ struct gigaset_driver *drv;
+ unsigned i;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_for_each_entry(drv, &drivers, list) {
+ spin_lock(&drv->lock);
+ for (i = 0; i < drv->minors; ++i) {
+ if (drv->flags[i] & VALID_ID) {
+ cs = drv->cs + i;
+ if (cs->myid == id)
+ ret = cs;
+ }
+ if (ret)
+ break;
+ }
+ spin_unlock(&drv->lock);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&driver_lock, flags);
+ return ret;
+}
+
+void gigaset_debugdrivers(void)
+{
+ unsigned long flags;
+ static struct cardstate *cs;
+ struct gigaset_driver *drv;
+ unsigned i;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_for_each_entry(drv, &drivers, list) {
+ dbg(DEBUG_DRIVER, "driver %p", drv);
+ spin_lock(&drv->lock);
+ for (i = 0; i < drv->minors; ++i) {
+ dbg(DEBUG_DRIVER, " index %u", i);
+ dbg(DEBUG_DRIVER, " flags 0x%02x", drv->flags[i]);
+ cs = drv->cs + i;
+ dbg(DEBUG_DRIVER, " cardstate %p", cs);
+ dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index);
+ dbg(DEBUG_DRIVER, " driver %p", cs->driver);
+ dbg(DEBUG_DRIVER, " i4l id %d", cs->myid);
+ }
+ spin_unlock(&drv->lock);
+ }
+ spin_unlock_irqrestore(&driver_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gigaset_debugdrivers);
+
+struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
+{
+ if (tty->index < 0 || tty->index >= tty->driver->num)
+ return NULL;
+ return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
+}
+
+struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
+{
+ unsigned long flags;
+ static struct cardstate *ret = NULL;
+ struct gigaset_driver *drv;
+ unsigned index;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_for_each_entry(drv, &drivers, list) {
+ if (minor < drv->minor || minor >= drv->minor + drv->minors)
+ continue;
+ index = minor - drv->minor;
+ spin_lock(&drv->lock);
+ if (drv->flags[index] & VALID_MINOR)
+ ret = drv->cs + index;
+ spin_unlock(&drv->lock);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&driver_lock, flags);
+ return ret;
+}
+
+void gigaset_freedriver(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_del(&drv->list);
+ spin_unlock_irqrestore(&driver_lock, flags);
+
+ gigaset_if_freedriver(drv);
+ module_put(drv->owner);
+
+ kfree(drv->cs);
+ kfree(drv->flags);
+ kfree(drv);
+}
+EXPORT_SYMBOL_GPL(gigaset_freedriver);
+
+/* gigaset_initdriver
+ * Allocate and initialize gigaset_driver structure. Initialize interface.
+ * parameters:
+ * minor First minor number
+ * minors Number of minors this driver can handle
+ * procname Name of the driver (e.g. for /proc/tty/drivers, path in /proc/driver)
+ * devname Name of the device files (prefix without minor number)
+ * devfsname Devfs name of the device files without %d
+ * return value:
+ * Pointer to the gigaset_driver structure on success, NULL on failure.
+ */
+struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
+ const char *procname,
+ const char *devname,
+ const char *devfsname,
+ const struct gigaset_ops *ops,
+ struct module *owner)
+{
+ struct gigaset_driver *drv;
+ unsigned long flags;
+ unsigned i;
+
+ drv = kmalloc(sizeof *drv, GFP_KERNEL);
+ if (!drv)
+ return NULL;
+ if (!try_module_get(owner))
+ return NULL;
+
+ drv->cs = NULL;
+ drv->have_tty = 0;
+ drv->minor = minor;
+ drv->minors = minors;
+ spin_lock_init(&drv->lock);
+ drv->blocked = 0;
+ drv->ops = ops;
+ drv->owner = owner;
+ INIT_LIST_HEAD(&drv->list);
+
+ drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL);
+ if (!drv->cs)
+ goto out1;
+ drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL);
+ if (!drv->flags)
+ goto out2;
+
+ for (i = 0; i < minors; ++i) {
+ drv->flags[i] = 0;
+ drv->cs[i].driver = drv;
+ drv->cs[i].ops = drv->ops;
+ drv->cs[i].minor_index = i;
+ }
+
+ gigaset_if_initdriver(drv, procname, devname, devfsname);
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_add(&drv->list, &drivers);
+ spin_unlock_irqrestore(&driver_lock, flags);
+
+ return drv;
+
+out2:
+ kfree(drv->cs);
+out1:
+ kfree(drv);
+ module_put(owner);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gigaset_initdriver);
+
+static struct cardstate *alloc_cs(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+ unsigned i;
+ static struct cardstate *ret = NULL;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ for (i = 0; i < drv->minors; ++i) {
+ if (!(drv->flags[i] & VALID_MINOR)) {
+ drv->flags[i] = VALID_MINOR;
+ ret = drv->cs + i;
+ }
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+ return ret;
+}
+
+static void free_cs(struct cardstate *cs)
+{
+ unsigned long flags;
+ struct gigaset_driver *drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ drv->flags[cs->minor_index] = 0;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+static void make_valid(struct cardstate *cs, unsigned mask)
+{
+ unsigned long flags;
+ struct gigaset_driver *drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ drv->flags[cs->minor_index] |= mask;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+static void make_invalid(struct cardstate *cs, unsigned mask)
+{
+ unsigned long flags;
+ struct gigaset_driver *drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ drv->flags[cs->minor_index] &= ~mask;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+/* For drivers without fixed assignment device<->cardstate (usb) */
+struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+ struct cardstate *cs = NULL;
+ unsigned i;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ if (drv->blocked)
+ goto exit;
+ for (i = 0; i < drv->minors; ++i) {
+ if ((drv->flags[i] & VALID_MINOR) &&
+ !(drv->flags[i] & ASSIGNED)) {
+ drv->flags[i] |= ASSIGNED;
+ cs = drv->cs + i;
+ break;
+ }
+ }
+exit:
+ spin_unlock_irqrestore(&drv->lock, flags);
+ return cs;
+}
+EXPORT_SYMBOL_GPL(gigaset_getunassignedcs);
+
+void gigaset_unassign(struct cardstate *cs)
+{
+ unsigned long flags;
+ unsigned *minor_flags;
+ struct gigaset_driver *drv;
+
+ if (!cs)
+ return;
+ drv = cs->driver;
+ spin_lock_irqsave(&drv->lock, flags);
+ minor_flags = drv->flags + cs->minor_index;
+ if (*minor_flags & VALID_MINOR)
+ *minor_flags &= ~ASSIGNED;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(gigaset_unassign);
+
+void gigaset_blockdriver(struct gigaset_driver *drv)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&drv->lock, flags);
+ drv->blocked = 1;
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+EXPORT_SYMBOL_GPL(gigaset_blockdriver);
+
+static int __init gigaset_init_module(void)
+{
+ /* in accordance with the principle of least astonishment,
+ * setting the 'debug' parameter to 1 activates a sensible
+ * set of default debug levels
+ */
+ if (gigaset_debuglevel == 1)
+ gigaset_debuglevel = DEBUG_DEFAULT;
+
+ info(DRIVER_AUTHOR);
+ info(DRIVER_DESC);
+ return 0;
+}
+
+static void __exit gigaset_exit_module(void)
+{
+}
+
+module_init(gigaset_init_module);
+module_exit(gigaset_exit_module);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
new file mode 100644
index 00000000000..fdcb80bb21c
--- /dev/null
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -0,0 +1,1983 @@
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: ev-layer.c,v 1.4.2.18 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+/* ========================================================== */
+/* bit masks for pending commands */
+#define PC_INIT 0x004
+#define PC_DLE0 0x008
+#define PC_DLE1 0x010
+#define PC_CID 0x080
+#define PC_NOCID 0x100
+#define PC_HUP 0x002
+#define PC_DIAL 0x001
+#define PC_ACCEPT 0x040
+#define PC_SHUTDOWN 0x020
+#define PC_CIDMODE 0x200
+#define PC_UMMODE 0x400
+
+/* types of modem responses */
+#define RT_NOTHING 0
+#define RT_ZSAU 1
+#define RT_RING 2
+#define RT_NUMBER 3
+#define RT_STRING 4
+#define RT_HEX 5
+#define RT_ZCAU 6
+
+/* Possible ASCII responses */
+#define RSP_OK 0
+//#define RSP_BUSY 1
+//#define RSP_CONNECT 2
+#define RSP_ZGCI 3
+#define RSP_RING 4
+#define RSP_ZAOC 5
+#define RSP_ZCSTR 6
+#define RSP_ZCFGT 7
+#define RSP_ZCFG 8
+#define RSP_ZCCR 9
+#define RSP_EMPTY 10
+#define RSP_ZLOG 11
+#define RSP_ZCAU 12
+#define RSP_ZMWI 13
+#define RSP_ZABINFO 14
+#define RSP_ZSMLSTCHG 15
+#define RSP_VAR 100
+#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
+#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
+#define RSP_ZVLS (RSP_VAR + VAR_ZVLS)
+#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
+#define RSP_STR (RSP_VAR + VAR_NUM)
+#define RSP_NMBR (RSP_STR + STR_NMBR)
+#define RSP_ZCPN (RSP_STR + STR_ZCPN)
+#define RSP_ZCON (RSP_STR + STR_ZCON)
+#define RSP_ZBC (RSP_STR + STR_ZBC)
+#define RSP_ZHLC (RSP_STR + STR_ZHLC)
+#define RSP_ERROR -1 /* ERROR */
+#define RSP_WRONG_CID -2 /* unknown cid in cmd */
+//#define RSP_EMPTY -3
+#define RSP_UNKNOWN -4 /* unknown response */
+#define RSP_FAIL -5 /* internal error */
+#define RSP_INVAL -6 /* invalid response */
+
+#define RSP_NONE -19
+#define RSP_STRING -20
+#define RSP_NULL -21
+//#define RSP_RETRYFAIL -22
+//#define RSP_RETRY -23
+//#define RSP_SKIP -24
+#define RSP_INIT -27
+#define RSP_ANY -26
+#define RSP_LAST -28
+#define RSP_NODEV -9
+
+/* actions for process_response */
+#define ACT_NOTHING 0
+#define ACT_SETDLE1 1
+#define ACT_SETDLE0 2
+#define ACT_FAILINIT 3
+#define ACT_HUPMODEM 4
+#define ACT_CONFIGMODE 5
+#define ACT_INIT 6
+#define ACT_DLE0 7
+#define ACT_DLE1 8
+#define ACT_FAILDLE0 9
+#define ACT_FAILDLE1 10
+#define ACT_RING 11
+#define ACT_CID 12
+#define ACT_FAILCID 13
+#define ACT_SDOWN 14
+#define ACT_FAILSDOWN 15
+#define ACT_DEBUG 16
+#define ACT_WARN 17
+#define ACT_DIALING 18
+#define ACT_ABORTDIAL 19
+#define ACT_DISCONNECT 20
+#define ACT_CONNECT 21
+#define ACT_REMOTEREJECT 22
+#define ACT_CONNTIMEOUT 23
+#define ACT_REMOTEHUP 24
+#define ACT_ABORTHUP 25
+#define ACT_ICALL 26
+#define ACT_ACCEPTED 27
+#define ACT_ABORTACCEPT 28
+#define ACT_TIMEOUT 29
+#define ACT_GETSTRING 30
+#define ACT_SETVER 31
+#define ACT_FAILVER 32
+#define ACT_GOTVER 33
+#define ACT_TEST 34
+#define ACT_ERROR 35
+#define ACT_ABORTCID 36
+#define ACT_ZCAU 37
+#define ACT_NOTIFY_BC_DOWN 38
+#define ACT_NOTIFY_BC_UP 39
+#define ACT_DIAL 40
+#define ACT_ACCEPT 41
+#define ACT_PROTO_L2 42
+#define ACT_HUP 43
+#define ACT_IF_LOCK 44
+#define ACT_START 45
+#define ACT_STOP 46
+#define ACT_FAKEDLE0 47
+#define ACT_FAKEHUP 48
+#define ACT_FAKESDOWN 49
+#define ACT_SHUTDOWN 50
+#define ACT_PROC_CIDMODE 51
+#define ACT_UMODESET 52
+#define ACT_FAILUMODE 53
+#define ACT_CMODESET 54
+#define ACT_FAILCMODE 55
+#define ACT_IF_VER 56
+#define ACT_CMD 100
+
+/* at command sequences */
+#define SEQ_NONE 0
+#define SEQ_INIT 100
+#define SEQ_DLE0 200
+#define SEQ_DLE1 250
+#define SEQ_CID 300
+#define SEQ_NOCID 350
+#define SEQ_HUP 400
+#define SEQ_DIAL 600
+#define SEQ_ACCEPT 720
+#define SEQ_SHUTDOWN 500
+#define SEQ_CIDMODE 10
+#define SEQ_UMMODE 11
+
+
+// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
+struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */
+{
+ /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+
+ /* initialize device, set cid mode if possible */
+ //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}},
+ //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}},
+ //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT,
+ // {ACT_TIMEOUT}},
+
+ {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT,
+ {ACT_TIMEOUT}}, /* wait until device is ready */
+
+ {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */
+ {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */
+
+ {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */
+ {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */
+
+ {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */
+ {RSP_OK, 108,108, -1, 104,-1},
+ {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"},
+ {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}},
+ {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}},
+
+ {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0,
+ ACT_HUPMODEM,
+ ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */
+ {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"},
+
+ {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */
+ {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}},
+ {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}},
+ {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}},
+
+ {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}},
+ {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}},
+
+ {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}},
+
+ {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
+ {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
+ {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}},
+#if 0
+ {EV_TIMEOUT, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
+ {RSP_ERROR, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
+ {RSP_OK, 121,121, -1, 130, 5, {ACT_GOTVER}, "^SGCI=1\r"},
+
+ {RSP_OK, 130,130, -1, 0, 0, {ACT_INIT}},
+ {RSP_ERROR, 130,130, -1, 0, 0, {ACT_FAILINIT}},
+ {EV_TIMEOUT, 130,130, -1, 0, 0, {ACT_FAILINIT}},
+#endif
+
+ /* leave dle mode */
+ {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
+ {RSP_OK, 201,201, -1, 202,-1},
+ //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE
+ {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}},
+ {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}},
+ {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
+ {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
+
+ /* enter dle mode */
+ {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
+ {RSP_OK, 251,251, -1, 252,-1},
+ {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}},
+ {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
+ {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
+
+ /* incoming call */
+ {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}},
+
+ /* get cid */
+ //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}},
+ //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}},
+ //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"},
+
+ {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
+ {RSP_OK, 301,301, -1, 302,-1},
+ {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}},
+ {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}},
+ {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}},
+
+ /* enter cid mode */
+ {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
+ {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}},
+ {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
+ {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
+
+ /* leave cid mode */
+ //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"},
+ {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"},
+ {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}},
+ {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
+ {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
+
+ /* abort getting cid */
+ {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}},
+
+ /* reset */
+#if 0
+ {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 503, 5, {0}, "^SGCI=0\r"},
+ {RSP_OK, 503,503, -1, 504, 5, {0}, "Z\r"},
+#endif
+ {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
+ {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}},
+ {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
+ {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
+ {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}},
+
+ {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME
+ {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME
+ {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME
+ {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME
+ {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME
+ {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME
+
+ /* misc. */
+ {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+
+ {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
+ {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
+ {RSP_LAST}
+};
+
+// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
+struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */
+{
+ /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+
+ /* dial */
+ {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME
+ {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}},
+ {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}},
+ {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
+ {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
+ {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}},
+ {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
+ {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
+ {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
+ {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
+ {RSP_OK, 607,607, -1, 608,-1},
+ //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
+ {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
+ {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
+
+ {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
+ {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
+
+ /* dialing */
+ {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
+
+ /* connection established */
+ {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
+ {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
+
+ {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
+
+ /* remote hangup */
+ {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
+ {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
+ {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
+
+ /* hangup */
+ {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME
+ {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1?
+ {RSP_OK, 401,401, -1, 402, 5},
+ {RSP_ZVLS, 402,402, 0, 403, 5},
+ {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */
+ //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
+ {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
+ {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
+ {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}},
+ {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}},
+
+ {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout
+
+ /* ring */
+ {RSP_ZBC, 700,700, -1, -1,-1, {0}},
+ {RSP_ZHLC, 700,700, -1, -1,-1, {0}},
+ {RSP_NMBR, 700,700, -1, -1,-1, {0}},
+ {RSP_ZCPN, 700,700, -1, -1,-1, {0}},
+ {RSP_ZCTP, 700,700, -1, -1,-1, {0}},
+ {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}},
+ {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}},
+
+ /*accept icall*/
+ {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME
+ {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}},
+ {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}},
+ {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
+ {RSP_OK, 723,723, -1, 724, 5, {0}},
+ {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}},
+ {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
+ {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
+ {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}},
+ {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
+ {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
+
+ {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
+
+ /* misc. */
+ {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME
+
+ {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+ {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
+
+ {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
+ {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
+ {RSP_LAST}
+};
+
+
+#if 0
+static struct reply_t tab_nocid[]= /* no dle mode */ //FIXME aenderungen uebernehmen
+{
+ /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+
+ {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
+ {RSP_LAST,0,0,0,0,0,0}
+};
+
+static struct reply_t tab_cid[] = /* no dle mode */ //FIXME aenderungen uebernehmen
+{
+ /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+
+ {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
+ {RSP_LAST,0,0,0,0,0,0}
+};
+#endif
+
+static struct resp_type_t resp_type[]=
+{
+ /*{"", RSP_EMPTY, RT_NOTHING},*/
+ {"OK", RSP_OK, RT_NOTHING},
+ {"ERROR", RSP_ERROR, RT_NOTHING},
+ {"ZSAU", RSP_ZSAU, RT_ZSAU},
+ {"ZCAU", RSP_ZCAU, RT_ZCAU},
+ {"RING", RSP_RING, RT_RING},
+ {"ZGCI", RSP_ZGCI, RT_NUMBER},
+ {"ZVLS", RSP_ZVLS, RT_NUMBER},
+ {"ZCTP", RSP_ZCTP, RT_NUMBER},
+ {"ZDLE", RSP_ZDLE, RT_NUMBER},
+ {"ZCFGT", RSP_ZCFGT, RT_NUMBER},
+ {"ZCCR", RSP_ZCCR, RT_NUMBER},
+ {"ZMWI", RSP_ZMWI, RT_NUMBER},
+ {"ZHLC", RSP_ZHLC, RT_STRING},
+ {"ZBC", RSP_ZBC, RT_STRING},
+ {"NMBR", RSP_NMBR, RT_STRING},
+ {"ZCPN", RSP_ZCPN, RT_STRING},
+ {"ZCON", RSP_ZCON, RT_STRING},
+ {"ZAOC", RSP_ZAOC, RT_STRING},
+ {"ZCSTR", RSP_ZCSTR, RT_STRING},
+ {"ZCFG", RSP_ZCFG, RT_HEX},
+ {"ZLOG", RSP_ZLOG, RT_NOTHING},
+ {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
+ {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
+ {NULL,0,0}
+};
+
+/*
+ * Get integer from char-pointer
+ */
+static int isdn_getnum(char *p)
+{
+ int v = -1;
+
+ IFNULLRETVAL(p, -1);
+
+ dbg(DEBUG_TRANSCMD, "string: %s", p);
+
+ while (*p >= '0' && *p <= '9')
+ v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0');
+ if (*p)
+ v = -1; /* invalid Character */
+ return v;
+}
+
+/*
+ * Get integer from char-pointer
+ */
+static int isdn_gethex(char *p)
+{
+ int v = 0;
+ int c;
+
+ IFNULLRETVAL(p, -1);
+
+ dbg(DEBUG_TRANSCMD, "string: %s", p);
+
+ if (!*p)
+ return -1;
+
+ do {
+ if (v > (INT_MAX - 15) / 16)
+ return -1;
+ c = *p;
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c -= 'a' - 10;
+ else if (c >= 'A' && c <= 'F')
+ c -= 'A' - 10;
+ else
+ return -1;
+ v = v * 16 + c;
+ } while (*++p);
+
+ return v;
+}
+
+static inline void new_index(atomic_t *index, int max)
+{
+ if (atomic_read(index) == max) //FIXME race?
+ atomic_set(index, 0);
+ else
+ atomic_inc(index);
+}
+
+/* retrieve CID from parsed response
+ * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
+ */
+static int cid_of_response(char *s)
+{
+ int cid;
+
+ if (s[-1] != ';')
+ return 0; /* no CID separator */
+ cid = isdn_getnum(s);
+ if (cid < 0)
+ return 0; /* CID not numeric */
+ if (cid < 1 || cid > 65535)
+ return -1; /* CID out of range */
+ return cid;
+ //FIXME is ;<digit>+ at end of non-CID response really impossible?
+}
+
+/* This function will be called via task queue from the callback handler.
+ * We received a modem response and have to handle it..
+ */
+void gigaset_handle_modem_response(struct cardstate *cs)
+{
+ unsigned char *argv[MAX_REC_PARAMS + 1];
+ int params;
+ int i, j;
+ struct resp_type_t *rt;
+ int curarg;
+ unsigned long flags;
+ unsigned next, tail, head;
+ struct event_t *event;
+ int resp_code;
+ int param_type;
+ int abort;
+ size_t len;
+ int cid;
+ int rawstring;
+
+ IFNULLRET(cs);
+
+ len = cs->cbytes;
+ if (!len) {
+ /* ignore additional LFs/CRs (M10x config mode or cx100) */
+ dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]);
+ return;
+ }
+ cs->respdata[len] = 0;
+ dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata);
+ argv[0] = cs->respdata;
+ params = 1;
+ if (cs->at_state.getstring) {
+ /* getstring only allowed without cid at the moment */
+ cs->at_state.getstring = 0;
+ rawstring = 1;
+ cid = 0;
+ } else {
+ /* parse line */
+ for (i = 0; i < len; i++)
+ switch (cs->respdata[i]) {
+ case ';':
+ case ',':
+ case '=':
+ if (params > MAX_REC_PARAMS) {
+ warn("too many parameters in response");
+ /* need last parameter (might be CID) */
+ params--;
+ }
+ argv[params++] = cs->respdata + i + 1;
+ }
+
+ rawstring = 0;
+ cid = params > 1 ? cid_of_response(argv[params-1]) : 0;
+ if (cid < 0) {
+ gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
+ NULL, 0, NULL);
+ return;
+ }
+
+ for (j = 1; j < params; ++j)
+ argv[j][-1] = 0;
+
+ dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]);
+ if (cid) {
+ --params;
+ dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]);
+ }
+ dbg(DEBUG_TRANSCMD, "available params: %d", params - 1);
+ for (j = 1; j < params; j++)
+ dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]);
+ }
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+ head = atomic_read(&cs->ev_head);
+ tail = atomic_read(&cs->ev_tail);
+
+ abort = 1;
+ curarg = 0;
+ while (curarg < params) {
+ next = (tail + 1) % MAX_EVENTS;
+ if (unlikely(next == head)) {
+ err("event queue full");
+ break;
+ }
+
+ event = cs->events + tail;
+ event->at_state = NULL;
+ event->cid = cid;
+ event->ptr = NULL;
+ event->arg = NULL;
+ tail = next;
+
+ if (rawstring) {
+ resp_code = RSP_STRING;
+ param_type = RT_STRING;
+ } else {
+ for (rt = resp_type; rt->response; ++rt)
+ if (!strcmp(argv[curarg], rt->response))
+ break;
+
+ if (!rt->response) {
+ event->type = RSP_UNKNOWN;
+ warn("unknown modem response: %s",
+ argv[curarg]);
+ break;
+ }
+
+ resp_code = rt->resp_code;
+ param_type = rt->type;
+ ++curarg;
+ }
+
+ event->type = resp_code;
+
+ switch (param_type) {
+ case RT_NOTHING:
+ break;
+ case RT_RING:
+ if (!cid) {
+ err("received RING without CID!");
+ event->type = RSP_INVAL;
+ abort = 1;
+ } else {
+ event->cid = 0;
+ event->parameter = cid;
+ abort = 0;
+ }
+ break;
+ case RT_ZSAU:
+ if (curarg >= params) {
+ event->parameter = ZSAU_NONE;
+ break;
+ }
+ if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING"))
+ event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING;
+ else if (!strcmp(argv[curarg], "CALL_DELIVERED"))
+ event->parameter = ZSAU_CALL_DELIVERED;
+ else if (!strcmp(argv[curarg], "ACTIVE"))
+ event->parameter = ZSAU_ACTIVE;
+ else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
+ event->parameter = ZSAU_DISCONNECT_IND;
+ else if (!strcmp(argv[curarg], "NULL"))
+ event->parameter = ZSAU_NULL;
+ else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
+ event->parameter = ZSAU_DISCONNECT_REQ;
+ else {
+ event->parameter = ZSAU_UNKNOWN;
+ warn("%s: unknown parameter %s after ZSAU",
+ __func__, argv[curarg]);
+ }
+ ++curarg;
+ break;
+ case RT_STRING:
+ if (curarg < params) {
+ len = strlen(argv[curarg]) + 1;
+ event->ptr = kmalloc(len, GFP_ATOMIC);
+ if (event->ptr)
+ memcpy(event->ptr, argv[curarg], len);
+ else
+ err("no memory for string!");
+ ++curarg;
+ }
+#ifdef CONFIG_GIGASET_DEBUG
+ if (!event->ptr)
+ dbg(DEBUG_CMD, "string==NULL");
+ else
+ dbg(DEBUG_CMD,
+ "string==%s", (char *) event->ptr);
+#endif
+ break;
+ case RT_ZCAU:
+ event->parameter = -1;
+ if (curarg + 1 < params) {
+ i = isdn_gethex(argv[curarg]);
+ j = isdn_gethex(argv[curarg + 1]);
+ if (i >= 0 && i < 256 && j >= 0 && j < 256)
+ event->parameter = (unsigned) i << 8
+ | j;
+ curarg += 2;
+ } else
+ curarg = params - 1;
+ break;
+ case RT_NUMBER:
+ case RT_HEX:
+ if (curarg < params) {
+ if (param_type == RT_HEX)
+ event->parameter =
+ isdn_gethex(argv[curarg]);
+ else
+ event->parameter =
+ isdn_getnum(argv[curarg]);
+ ++curarg;
+ } else
+ event->parameter = -1;
+#ifdef CONFIG_GIGASET_DEBUG
+ dbg(DEBUG_CMD, "parameter==%d", event->parameter);
+#endif
+ break;
+ }
+
+ if (resp_code == RSP_ZDLE)
+ cs->dle = event->parameter;
+
+ if (abort)
+ break;
+ }
+
+ atomic_set(&cs->ev_tail, tail);
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
+
+ if (curarg != params)
+ dbg(DEBUG_ANY, "invalid number of processed parameters: %d/%d",
+ curarg, params);
+}
+EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
+
+/* disconnect
+ * process closing of connection associated with given AT state structure
+ */
+static void disconnect(struct at_state_t **at_state_p)
+{
+ unsigned long flags;
+ struct bc_state *bcs;
+ struct cardstate *cs;
+
+ IFNULLRET(at_state_p);
+ IFNULLRET(*at_state_p);
+ bcs = (*at_state_p)->bcs;
+ cs = (*at_state_p)->cs;
+ IFNULLRET(cs);
+
+ new_index(&(*at_state_p)->seq_index, MAX_SEQ_INDEX);
+
+ /* revert to selected idle mode */
+ if (!atomic_read(&cs->cidmode)) {
+ cs->at_state.pending_commands |= PC_UMMODE;
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
+ }
+
+ if (bcs) {
+ /* B channel assigned: invoke hardware specific handler */
+ cs->ops->close_bchannel(bcs);
+ } else {
+ /* no B channel assigned: just deallocate */
+ spin_lock_irqsave(&cs->lock, flags);
+ list_del(&(*at_state_p)->list);
+ kfree(*at_state_p);
+ *at_state_p = NULL;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ }
+}
+
+/* get_free_channel
+ * get a free AT state structure: either one of those associated with the
+ * B channels of the Gigaset device, or if none of those is available,
+ * a newly allocated one with bcs=NULL
+ * The structure should be freed by calling disconnect() after use.
+ */
+static inline struct at_state_t *get_free_channel(struct cardstate *cs,
+ int cid)
+/* cids: >0: siemens-cid
+ 0: without cid
+ -1: no cid assigned yet
+*/
+{
+ unsigned long flags;
+ int i;
+ struct at_state_t *ret;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (gigaset_get_channel(cs->bcs + i)) {
+ ret = &cs->bcs[i].at_state;
+ ret->cid = cid;
+ return ret;
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
+ if (ret) {
+ gigaset_at_init(ret, NULL, cs, cid);
+ list_add(&ret->list, &cs->temp_at_states);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return ret;
+}
+
+static void init_failed(struct cardstate *cs, int mode)
+{
+ int i;
+ struct at_state_t *at_state;
+
+ cs->at_state.pending_commands &= ~PC_INIT;
+ atomic_set(&cs->mode, mode);
+ atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ gigaset_free_channels(cs);
+ for (i = 0; i < cs->channels; ++i) {
+ at_state = &cs->bcs[i].at_state;
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands &= ~PC_CID;
+ at_state->pending_commands |= PC_NOCID;
+ atomic_set(&cs->commands_pending, 1);
+ }
+ }
+}
+
+static void schedule_init(struct cardstate *cs, int state)
+{
+ if (cs->at_state.pending_commands & PC_INIT) {
+ dbg(DEBUG_CMD, "not scheduling PC_INIT again");
+ return;
+ }
+ atomic_set(&cs->mstate, state);
+ atomic_set(&cs->mode, M_UNKNOWN);
+ gigaset_block_channels(cs);
+ cs->at_state.pending_commands |= PC_INIT;
+ atomic_set(&cs->commands_pending, 1);
+ dbg(DEBUG_CMD, "Scheduling PC_INIT");
+}
+
+/* Add "AT" to a command, add the cid, dle encode it, send the result to the hardware. */
+static void send_command(struct cardstate *cs, const char *cmd, int cid,
+ int dle, gfp_t kmallocflags)
+{
+ size_t cmdlen, buflen;
+ char *cmdpos, *cmdbuf, *cmdtail;
+
+ cmdlen = strlen(cmd);
+ buflen = 11 + cmdlen;
+
+ if (likely(buflen > cmdlen)) {
+ cmdbuf = kmalloc(buflen, kmallocflags);
+ if (likely(cmdbuf != NULL)) {
+ cmdpos = cmdbuf + 9;
+ cmdtail = cmdpos + cmdlen;
+ memcpy(cmdpos, cmd, cmdlen);
+
+ if (cid > 0 && cid <= 65535) {
+ do {
+ *--cmdpos = '0' + cid % 10;
+ cid /= 10;
+ ++cmdlen;
+ } while (cid);
+ }
+
+ cmdlen += 2;
+ *--cmdpos = 'T';
+ *--cmdpos = 'A';
+
+ if (dle) {
+ cmdlen += 4;
+ *--cmdpos = '(';
+ *--cmdpos = 0x10;
+ *cmdtail++ = 0x10;
+ *cmdtail++ = ')';
+ }
+
+ cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL);
+ kfree(cmdbuf);
+ } else
+ err("no memory for command buffer");
+ } else
+ err("overflow in buflen");
+}
+
+static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
+{
+ struct at_state_t *at_state;
+ int i;
+ unsigned long flags;
+
+ if (cid == 0)
+ return &cs->at_state;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cid == cs->bcs[i].at_state.cid)
+ return &cs->bcs[i].at_state;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (cid == at_state->cid) {
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return at_state;
+ }
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ return NULL;
+}
+
+static void bchannel_down(struct bc_state *bcs)
+{
+ IFNULLRET(bcs);
+ IFNULLRET(bcs->cs);
+
+ if (bcs->chstate & CHS_B_UP) {
+ bcs->chstate &= ~CHS_B_UP;
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
+ }
+
+ if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
+ bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
+ }
+
+ gigaset_free_channel(bcs);
+
+ gigaset_bcs_reinit(bcs);
+}
+
+static void bchannel_up(struct bc_state *bcs)
+{
+ IFNULLRET(bcs);
+
+ if (!(bcs->chstate & CHS_D_UP)) {
+ notice("%s: D channel not up", __func__);
+ bcs->chstate |= CHS_D_UP;
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+ }
+
+ if (bcs->chstate & CHS_B_UP) {
+ notice("%s: B channel already up", __func__);
+ return;
+ }
+
+ bcs->chstate |= CHS_B_UP;
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
+}
+
+static void start_dial(struct at_state_t *at_state, void *data, int seq_index)
+{
+ struct bc_state *bcs = at_state->bcs;
+ struct cardstate *cs = at_state->cs;
+ int retval;
+
+ bcs->chstate |= CHS_NOTIFY_LL;
+ //atomic_set(&bcs->status, BCS_INIT);
+
+ if (atomic_read(&at_state->seq_index) != seq_index)
+ goto error;
+
+ retval = gigaset_isdn_setup_dial(at_state, data);
+ if (retval != 0)
+ goto error;
+
+
+ at_state->pending_commands |= PC_CID;
+ dbg(DEBUG_CMD, "Scheduling PC_CID");
+//#ifdef GIG_MAYINITONDIAL
+// if (atomic_read(&cs->MState) == MS_UNKNOWN) {
+// cs->at_state.pending_commands |= PC_INIT;
+// dbg(DEBUG_CMD, "Scheduling PC_INIT");
+// }
+//#endif
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ return;
+
+error:
+ at_state->pending_commands |= PC_NOCID;
+ dbg(DEBUG_CMD, "Scheduling PC_NOCID");
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ return;
+}
+
+static void start_accept(struct at_state_t *at_state)
+{
+ struct cardstate *cs = at_state->cs;
+ int retval;
+
+ retval = gigaset_isdn_setup_accept(at_state);
+
+ if (retval == 0) {
+ at_state->pending_commands |= PC_ACCEPT;
+ dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ } else {
+ //FIXME
+ at_state->pending_commands |= PC_HUP;
+ dbg(DEBUG_CMD, "Scheduling PC_HUP");
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ }
+}
+
+static void do_start(struct cardstate *cs)
+{
+ gigaset_free_channels(cs);
+
+ if (atomic_read(&cs->mstate) != MS_LOCKED)
+ schedule_init(cs, MS_INIT);
+
+ gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
+ // FIXME: not in locked mode
+ // FIXME 2: only after init sequence
+
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+}
+
+static void finish_shutdown(struct cardstate *cs)
+{
+ if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ atomic_set(&cs->mode, M_UNKNOWN);
+ }
+
+ /* The rest is done by cleanup_cs () in user mode. */
+
+ cs->cmd_result = -ENODEV;
+ cs->waiting = 0;
+ wake_up_interruptible(&cs->waitqueue);
+}
+
+static void do_shutdown(struct cardstate *cs)
+{
+ gigaset_block_channels(cs);
+
+ if (atomic_read(&cs->mstate) == MS_READY) {
+ atomic_set(&cs->mstate, MS_SHUTDOWN);
+ cs->at_state.pending_commands |= PC_SHUTDOWN;
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN"); //FIXME
+ //gigaset_schedule_event(cs); //FIXME
+ } else
+ finish_shutdown(cs);
+}
+
+static void do_stop(struct cardstate *cs)
+{
+ do_shutdown(cs);
+}
+
+/* Entering cid mode or getting a cid failed:
+ * try to initialize the device and try again.
+ *
+ * channel >= 0: getting cid for the channel failed
+ * channel < 0: entering cid mode failed
+ *
+ * returns 0 on failure
+ */
+static int reinit_and_retry(struct cardstate *cs, int channel)
+{
+ int i;
+
+ if (--cs->retry_count <= 0)
+ return 0;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].at_state.cid > 0)
+ return 0;
+
+ if (channel < 0)
+ warn("Could not enter cid mode. Reinit device and try again.");
+ else {
+ warn("Could not get a call id. Reinit device and try again.");
+ cs->bcs[channel].at_state.pending_commands |= PC_CID;
+ }
+ schedule_init(cs, MS_INIT);
+ return 1;
+}
+
+static int at_state_invalid(struct cardstate *cs,
+ struct at_state_t *test_ptr)
+{
+ unsigned long flags;
+ unsigned channel;
+ struct at_state_t *at_state;
+ int retval = 0;
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ if (test_ptr == &cs->at_state)
+ goto exit;
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (at_state == test_ptr)
+ goto exit;
+
+ for (channel = 0; channel < cs->channels; ++channel)
+ if (&cs->bcs[channel].at_state == test_ptr)
+ goto exit;
+
+ retval = 1;
+exit:
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return retval;
+}
+
+static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
+ struct at_state_t **p_at_state)
+{
+ int retval;
+ struct at_state_t *at_state = *p_at_state;
+
+ retval = gigaset_isdn_icall(at_state);
+ switch (retval) {
+ case ICALL_ACCEPT:
+ break;
+ default:
+ err("internal error: disposition=%d", retval);
+ /* --v-- fall through --v-- */
+ case ICALL_IGNORE:
+ case ICALL_REJECT:
+ /* hang up actively
+ * Device doc says that would reject the call.
+ * In fact it doesn't.
+ */
+ at_state->pending_commands |= PC_HUP;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+ }
+}
+
+static int do_lock(struct cardstate *cs)
+{
+ int mode;
+ int i;
+
+ switch (atomic_read(&cs->mstate)) {
+ case MS_UNINITIALIZED:
+ case MS_READY:
+ if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
+ cs->at_state.pending_commands)
+ return -EBUSY;
+
+ for (i = 0; i < cs->channels; ++i)
+ if (cs->bcs[i].at_state.pending_commands)
+ return -EBUSY;
+
+ if (!gigaset_get_channels(cs))
+ return -EBUSY;
+
+ break;
+ case MS_LOCKED:
+ //retval = -EACCES;
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ mode = atomic_read(&cs->mode);
+ atomic_set(&cs->mstate, MS_LOCKED);
+ atomic_set(&cs->mode, M_UNKNOWN);
+ //FIXME reset card state / at states / bcs states
+
+ return mode;
+}
+
+static int do_unlock(struct cardstate *cs)
+{
+ if (atomic_read(&cs->mstate) != MS_LOCKED)
+ return -EINVAL;
+
+ atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ atomic_set(&cs->mode, M_UNKNOWN);
+ gigaset_free_channels(cs);
+ //FIXME reset card state / at states / bcs states
+ if (atomic_read(&cs->connected))
+ schedule_init(cs, MS_INIT);
+
+ return 0;
+}
+
+static void do_action(int action, struct cardstate *cs,
+ struct bc_state *bcs,
+ struct at_state_t **p_at_state, char **pp_command,
+ int *p_genresp, int *p_resp_code,
+ struct event_t *ev)
+{
+ struct at_state_t *at_state = *p_at_state;
+ struct at_state_t *at_state2;
+ unsigned long flags;
+
+ int channel;
+
+ unsigned char *s, *e;
+ int i;
+ unsigned long val;
+
+ switch (action) {
+ case ACT_NOTHING:
+ break;
+ case ACT_TIMEOUT:
+ at_state->waiting = 1;
+ break;
+ case ACT_INIT:
+ //FIXME setup everything
+ cs->at_state.pending_commands &= ~PC_INIT;
+ cs->cur_at_seq = SEQ_NONE;
+ atomic_set(&cs->mode, M_UNIMODEM);
+ if (!atomic_read(&cs->cidmode)) {
+ gigaset_free_channels(cs);
+ atomic_set(&cs->mstate, MS_READY);
+ break;
+ }
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
+ break;
+ case ACT_FAILINIT:
+ warn("Could not initialize the device.");
+ cs->dle = 0;
+ init_failed(cs, M_UNKNOWN);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_CONFIGMODE:
+ init_failed(cs, M_CONFIG);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_SETDLE1:
+ cs->dle = 1;
+ /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
+ cs->inbuf[0].inputstate &=
+ ~(INS_command | INS_DLE_command);
+ break;
+ case ACT_SETDLE0:
+ cs->dle = 0;
+ cs->inbuf[0].inputstate =
+ (cs->inbuf[0].inputstate & ~INS_DLE_command)
+ | INS_command;
+ break;
+ case ACT_CMODESET:
+ if (atomic_read(&cs->mstate) == MS_INIT ||
+ atomic_read(&cs->mstate) == MS_RECOVER) {
+ gigaset_free_channels(cs);
+ atomic_set(&cs->mstate, MS_READY);
+ }
+ atomic_set(&cs->mode, M_CID);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_UMODESET:
+ atomic_set(&cs->mode, M_UNIMODEM);
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+ case ACT_FAILCMODE:
+ cs->cur_at_seq = SEQ_NONE;
+ if (atomic_read(&cs->mstate) == MS_INIT ||
+ atomic_read(&cs->mstate) == MS_RECOVER) {
+ init_failed(cs, M_UNKNOWN);
+ break;
+ }
+ if (!reinit_and_retry(cs, -1))
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILUMODE:
+ cs->cur_at_seq = SEQ_NONE;
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_HUPMODEM:
+ /* send "+++" (hangup in unimodem mode) */
+ cs->ops->write_cmd(cs, "+++", 3, NULL);
+ break;
+ case ACT_RING:
+ /* get fresh AT state structure for new CID */
+ at_state2 = get_free_channel(cs, ev->parameter);
+ if (!at_state2) {
+ warn("RING ignored: "
+ "could not allocate channel structure");
+ break;
+ }
+
+ /* initialize AT state structure
+ * note that bcs may be NULL if no B channel is free
+ */
+ at_state2->ConState = 700;
+ kfree(at_state2->str_var[STR_NMBR]);
+ at_state2->str_var[STR_NMBR] = NULL;
+ kfree(at_state2->str_var[STR_ZCPN]);
+ at_state2->str_var[STR_ZCPN] = NULL;
+ kfree(at_state2->str_var[STR_ZBC]);
+ at_state2->str_var[STR_ZBC] = NULL;
+ kfree(at_state2->str_var[STR_ZHLC]);
+ at_state2->str_var[STR_ZHLC] = NULL;
+ at_state2->int_var[VAR_ZCTP] = -1;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state2->timer_expires = RING_TIMEOUT;
+ at_state2->timer_active = 1;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ break;
+ case ACT_ICALL:
+ handle_icall(cs, bcs, p_at_state);
+ at_state = *p_at_state;
+ break;
+ case ACT_FAILSDOWN:
+ warn("Could not shut down the device.");
+ /* fall through */
+ case ACT_FAKESDOWN:
+ case ACT_SDOWN:
+ cs->cur_at_seq = SEQ_NONE;
+ finish_shutdown(cs);
+ break;
+ case ACT_CONNECT:
+ if (cs->onechannel) {
+ at_state->pending_commands |= PC_DLE1;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+ }
+ bcs->chstate |= CHS_D_UP;
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+ cs->ops->init_bchannel(bcs);
+ break;
+ case ACT_DLE1:
+ cs->cur_at_seq = SEQ_NONE;
+ bcs = cs->bcs + cs->curchannel;
+
+ bcs->chstate |= CHS_D_UP;
+ gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
+ cs->ops->init_bchannel(bcs);
+ break;
+ case ACT_FAKEHUP:
+ at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
+ /* fall through */
+ case ACT_DISCONNECT:
+ cs->cur_at_seq = SEQ_NONE;
+ at_state->cid = -1;
+ if (bcs && cs->onechannel && cs->dle) {
+ /* Check for other open channels not needed:
+ * DLE only used for M10x with one B channel.
+ */
+ at_state->pending_commands |= PC_DLE0;
+ atomic_set(&cs->commands_pending, 1);
+ } else {
+ disconnect(p_at_state);
+ at_state = *p_at_state;
+ }
+ break;
+ case ACT_FAKEDLE0:
+ at_state->int_var[VAR_ZDLE] = 0;
+ cs->dle = 0;
+ /* fall through */
+ case ACT_DLE0:
+ cs->cur_at_seq = SEQ_NONE;
+ at_state2 = &cs->bcs[cs->curchannel].at_state;
+ disconnect(&at_state2);
+ break;
+ case ACT_ABORTHUP:
+ cs->cur_at_seq = SEQ_NONE;
+ warn("Could not hang up.");
+ at_state->cid = -1;
+ if (bcs && cs->onechannel)
+ at_state->pending_commands |= PC_DLE0;
+ else {
+ disconnect(p_at_state);
+ at_state = *p_at_state;
+ }
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILDLE0:
+ cs->cur_at_seq = SEQ_NONE;
+ warn("Could not leave DLE mode.");
+ at_state2 = &cs->bcs[cs->curchannel].at_state;
+ disconnect(&at_state2);
+ schedule_init(cs, MS_RECOVER);
+ break;
+ case ACT_FAILDLE1:
+ cs->cur_at_seq = SEQ_NONE;
+ warn("Could not enter DLE mode. Try to hang up.");
+ channel = cs->curchannel;
+ cs->bcs[channel].at_state.pending_commands |= PC_HUP;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+
+ case ACT_CID: /* got cid; start dialing */
+ cs->cur_at_seq = SEQ_NONE;
+ channel = cs->curchannel;
+ if (ev->parameter > 0 && ev->parameter <= 65535) {
+ cs->bcs[channel].at_state.cid = ev->parameter;
+ cs->bcs[channel].at_state.pending_commands |=
+ PC_DIAL;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+ }
+ /* fall through */
+ case ACT_FAILCID:
+ cs->cur_at_seq = SEQ_NONE;
+ channel = cs->curchannel;
+ if (!reinit_and_retry(cs, channel)) {
+ warn("Could not get a call id. Dialing not possible");
+ at_state2 = &cs->bcs[channel].at_state;
+ disconnect(&at_state2);
+ }
+ break;
+ case ACT_ABORTCID:
+ cs->cur_at_seq = SEQ_NONE;
+ at_state2 = &cs->bcs[cs->curchannel].at_state;
+ disconnect(&at_state2);
+ break;
+
+ case ACT_DIALING:
+ case ACT_ACCEPTED:
+ cs->cur_at_seq = SEQ_NONE;
+ break;
+
+ case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */
+ disconnect(p_at_state);
+ at_state = *p_at_state;
+ break;
+
+ case ACT_ABORTDIAL: /* error/timeout during dial preparation */
+ cs->cur_at_seq = SEQ_NONE;
+ at_state->pending_commands |= PC_HUP;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+
+ case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
+ case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
+ case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
+ at_state->pending_commands |= PC_HUP;
+ atomic_set(&cs->commands_pending, 1);
+ break;
+ case ACT_GETSTRING: /* warning: RING, ZDLE, ... are not handled properly any more */
+ at_state->getstring = 1;
+ break;
+ case ACT_SETVER:
+ if (!ev->ptr) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+ s = ev->ptr;
+
+ if (!strcmp(s, "OK")) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ val = simple_strtoul(s, (char **) &e, 10);
+ if (val > INT_MAX || e == s)
+ break;
+ if (i == 3) {
+ if (*e)
+ break;
+ } else if (*e != '.')
+ break;
+ else
+ s = e + 1;
+ cs->fwver[i] = val;
+ }
+ if (i != 4) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ }
+ /*at_state->getstring = 1;*/
+ cs->gotfwver = 0;
+ break;
+ case ACT_GOTVER:
+ if (cs->gotfwver == 0) {
+ cs->gotfwver = 1;
+ dbg(DEBUG_ANY,
+ "firmware version %02d.%03d.%02d.%02d",
+ cs->fwver[0], cs->fwver[1],
+ cs->fwver[2], cs->fwver[3]);
+ break;
+ }
+ /* fall through */
+ case ACT_FAILVER:
+ cs->gotfwver = -1;
+ err("could not read firmware version.");
+ break;
+#ifdef CONFIG_GIGASET_DEBUG
+ case ACT_ERROR:
+ *p_genresp = 1;
+ *p_resp_code = RSP_ERROR;
+ break;
+ case ACT_TEST:
+ {
+ static int count = 3; //2; //1;
+ *p_genresp = 1;
+ *p_resp_code = count ? RSP_ERROR : RSP_OK;
+ if (count > 0)
+ --count;
+ }
+ break;
+#endif
+ case ACT_DEBUG:
+ dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
+ __func__, ev->type, at_state->ConState);
+ break;
+ case ACT_WARN:
+ warn("%s: resp_code %d in ConState %d!",
+ __func__, ev->type, at_state->ConState);
+ break;
+ case ACT_ZCAU:
+ warn("cause code %04x in connection state %d.",
+ ev->parameter, at_state->ConState);
+ break;
+
+ /* events from the LL */
+ case ACT_DIAL:
+ start_dial(at_state, ev->ptr, ev->parameter);
+ break;
+ case ACT_ACCEPT:
+ start_accept(at_state);
+ break;
+ case ACT_PROTO_L2:
+ dbg(DEBUG_CMD,
+ "set protocol to %u", (unsigned) ev->parameter);
+ at_state->bcs->proto2 = ev->parameter;
+ break;
+ case ACT_HUP:
+ at_state->pending_commands |= PC_HUP;
+ atomic_set(&cs->commands_pending, 1); //FIXME
+ dbg(DEBUG_CMD, "Scheduling PC_HUP");
+ break;
+
+ /* hotplug events */
+ case ACT_STOP:
+ do_stop(cs);
+ break;
+ case ACT_START:
+ do_start(cs);
+ break;
+
+ /* events from the interface */ // FIXME without ACT_xxxx?
+ case ACT_IF_LOCK:
+ cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+ case ACT_IF_VER:
+ if (ev->parameter != 0)
+ cs->cmd_result = -EINVAL;
+ else if (cs->gotfwver != 1) {
+ cs->cmd_result = -ENOENT;
+ } else {
+ memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
+ cs->cmd_result = 0;
+ }
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+
+ /* events from the proc file system */ // FIXME without ACT_xxxx?
+ case ACT_PROC_CIDMODE:
+ if (ev->parameter != atomic_read(&cs->cidmode)) {
+ atomic_set(&cs->cidmode, ev->parameter);
+ if (ev->parameter) {
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
+ } else {
+ cs->at_state.pending_commands |= PC_UMMODE;
+ dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
+ }
+ atomic_set(&cs->commands_pending, 1);
+ }
+ cs->waiting = 0;
+ wake_up(&cs->waitqueue);
+ break;
+
+ /* events from the hardware drivers */
+ case ACT_NOTIFY_BC_DOWN:
+ bchannel_down(bcs);
+ break;
+ case ACT_NOTIFY_BC_UP:
+ bchannel_up(bcs);
+ break;
+ case ACT_SHUTDOWN:
+ do_shutdown(cs);
+ break;
+
+
+ default:
+ if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
+ *pp_command = at_state->bcs->commands[action - ACT_CMD];
+ if (!*pp_command) {
+ *p_genresp = 1;
+ *p_resp_code = RSP_NULL;
+ }
+ } else
+ err("%s: action==%d!", __func__, action);
+ }
+}
+
+/* State machine to do the calling and hangup procedure */
+static void process_event(struct cardstate *cs, struct event_t *ev)
+{
+ struct bc_state *bcs;
+ char *p_command = NULL;
+ struct reply_t *rep;
+ int rcode;
+ int genresp = 0;
+ int resp_code = RSP_ERROR;
+ int sendcid;
+ struct at_state_t *at_state;
+ int index;
+ int curact;
+ unsigned long flags;
+
+ IFNULLRET(cs);
+ IFNULLRET(ev);
+
+ if (ev->cid >= 0) {
+ at_state = at_state_from_cid(cs, ev->cid);
+ if (!at_state) {
+ gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
+ NULL, 0, NULL);
+ return;
+ }
+ } else {
+ at_state = ev->at_state;
+ if (at_state_invalid(cs, at_state)) {
+ dbg(DEBUG_ANY,
+ "event for invalid at_state %p", at_state);
+ return;
+ }
+ }
+
+ dbg(DEBUG_CMD,
+ "connection state %d, event %d", at_state->ConState, ev->type);
+
+ bcs = at_state->bcs;
+ sendcid = at_state->cid;
+
+ /* Setting the pointer to the dial array */
+ rep = at_state->replystruct;
+ IFNULLRET(rep);
+
+ if (ev->type == EV_TIMEOUT) {
+ if (ev->parameter != atomic_read(&at_state->timer_index)
+ || !at_state->timer_active) {
+ ev->type = RSP_NONE; /* old timeout */
+ dbg(DEBUG_ANY, "old timeout");
+ } else if (!at_state->waiting)
+ dbg(DEBUG_ANY, "timeout occured");
+ else
+ dbg(DEBUG_ANY, "stopped waiting");
+ }
+
+ /* if the response belongs to a variable in at_state->int_var[VAR_XXXX] or at_state->str_var[STR_XXXX], set it */
+ if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
+ index = ev->type - RSP_VAR;
+ at_state->int_var[index] = ev->parameter;
+ } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
+ index = ev->type - RSP_STR;
+ kfree(at_state->str_var[index]);
+ at_state->str_var[index] = ev->ptr;
+ ev->ptr = NULL; /* prevent process_events() from deallocating ptr */
+ }
+
+ if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
+ at_state->getstring = 0;
+
+ /* Search row in dial array which matches modem response and current constate */
+ for (;; rep++) {
+ rcode = rep->resp_code;
+ /* dbg (DEBUG_ANY, "rcode %d", rcode); */
+ if (rcode == RSP_LAST) {
+ /* found nothing...*/
+ warn("%s: rcode=RSP_LAST: resp_code %d in ConState %d!",
+ __func__, ev->type, at_state->ConState);
+ return;
+ }
+ if ((rcode == RSP_ANY || rcode == ev->type)
+ && ((int) at_state->ConState >= rep->min_ConState)
+ && (rep->max_ConState < 0
+ || (int) at_state->ConState <= rep->max_ConState)
+ && (rep->parameter < 0 || rep->parameter == ev->parameter))
+ break;
+ }
+
+ p_command = rep->command;
+
+ at_state->waiting = 0;
+ for (curact = 0; curact < MAXACT; ++curact) {
+ /* The row tells us what we should do ..
+ */
+ do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev);
+ if (!at_state)
+ break; /* may be freed after disconnect */
+ }
+
+ if (at_state) {
+ /* Jump to the next con-state regarding the array */
+ if (rep->new_ConState >= 0)
+ at_state->ConState = rep->new_ConState;
+
+ if (genresp) {
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state->timer_expires = 0; //FIXME
+ at_state->timer_active = 0; //FIXME
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+ } else {
+ /* Send command to modem if not NULL... */
+ if (p_command/*rep->command*/) {
+ if (atomic_read(&cs->connected))
+ send_command(cs, p_command,
+ sendcid, cs->dle,
+ GFP_ATOMIC);
+ else
+ gigaset_add_event(cs, at_state,
+ RSP_NODEV,
+ NULL, 0, NULL);
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!rep->timeout) {
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ } else if (rep->timeout > 0) { /* new timeout */
+ at_state->timer_expires = rep->timeout * 10;
+ at_state->timer_active = 1;
+ new_index(&at_state->timer_index,
+ MAX_TIMER_INDEX);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+ }
+ }
+}
+
+static void schedule_sequence(struct cardstate *cs,
+ struct at_state_t *at_state, int sequence)
+{
+ cs->cur_at_seq = sequence;
+ gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
+}
+
+static void process_command_flags(struct cardstate *cs)
+{
+ struct at_state_t *at_state = NULL;
+ struct bc_state *bcs;
+ int i;
+ int sequence;
+
+ IFNULLRET(cs);
+
+ atomic_set(&cs->commands_pending, 0);
+
+ if (cs->cur_at_seq) {
+ dbg(DEBUG_CMD, "not searching scheduled commands: busy");
+ return;
+ }
+
+ dbg(DEBUG_CMD, "searching scheduled commands");
+
+ sequence = SEQ_NONE;
+
+ /* clear pending_commands and hangup channels on shutdown */
+ if (cs->at_state.pending_commands & PC_SHUTDOWN) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ at_state = &bcs->at_state;
+ at_state->pending_commands &=
+ ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
+ if (at_state->cid > 0)
+ at_state->pending_commands |= PC_HUP;
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands |= PC_NOCID;
+ at_state->pending_commands &= ~PC_CID;
+ }
+ }
+ }
+
+ /* clear pending_commands and hangup channels on reset */
+ if (cs->at_state.pending_commands & PC_INIT) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ at_state = &bcs->at_state;
+ at_state->pending_commands &=
+ ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
+ if (at_state->cid > 0)
+ at_state->pending_commands |= PC_HUP;
+ if (atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (at_state->pending_commands & PC_CID) {
+ at_state->pending_commands |= PC_NOCID;
+ at_state->pending_commands &= ~PC_CID;
+ }
+ }
+ }
+ }
+
+ /* only switch back to unimodem mode, if no commands are pending and no channels are up */
+ if (cs->at_state.pending_commands == PC_UMMODE
+ && !atomic_read(&cs->cidmode)
+ && list_empty(&cs->temp_at_states)
+ && atomic_read(&cs->mode) == M_CID) {
+ sequence = SEQ_UMMODE;
+ at_state = &cs->at_state;
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands ||
+ bcs->at_state.cid > 0) {
+ sequence = SEQ_NONE;
+ break;
+ }
+ }
+ }
+ cs->at_state.pending_commands &= ~PC_UMMODE;
+ if (sequence != SEQ_NONE) {
+ schedule_sequence(cs, at_state, sequence);
+ return;
+ }
+
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands & PC_HUP) {
+ bcs->at_state.pending_commands &= ~PC_HUP;
+ if (bcs->at_state.pending_commands & PC_CID) {
+ /* not yet dialing: PC_NOCID is sufficient */
+ bcs->at_state.pending_commands |= PC_NOCID;
+ bcs->at_state.pending_commands &= ~PC_CID;
+ } else {
+ schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
+ return;
+ }
+ }
+ if (bcs->at_state.pending_commands & PC_NOCID) {
+ bcs->at_state.pending_commands &= ~PC_NOCID;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
+ return;
+ } else if (bcs->at_state.pending_commands & PC_DLE0) {
+ bcs->at_state.pending_commands &= ~PC_DLE0;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
+ return;
+ }
+ }
+
+ list_for_each_entry(at_state, &cs->temp_at_states, list)
+ if (at_state->pending_commands & PC_HUP) {
+ at_state->pending_commands &= ~PC_HUP;
+ schedule_sequence(cs, at_state, SEQ_HUP);
+ return;
+ }
+
+ if (cs->at_state.pending_commands & PC_INIT) {
+ cs->at_state.pending_commands &= ~PC_INIT;
+ cs->dle = 0; //FIXME
+ cs->inbuf->inputstate = INS_command;
+ //FIXME reset card state (or -> LOCK0)?
+ schedule_sequence(cs, &cs->at_state, SEQ_INIT);
+ return;
+ }
+ if (cs->at_state.pending_commands & PC_SHUTDOWN) {
+ cs->at_state.pending_commands &= ~PC_SHUTDOWN;
+ schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
+ return;
+ }
+ if (cs->at_state.pending_commands & PC_CIDMODE) {
+ cs->at_state.pending_commands &= ~PC_CIDMODE;
+ if (atomic_read(&cs->mode) == M_UNIMODEM) {
+#if 0
+ cs->retry_count = 2;
+#else
+ cs->retry_count = 1;
+#endif
+ schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
+ return;
+ }
+ }
+
+ for (i = 0; i < cs->channels; ++i) {
+ bcs = cs->bcs + i;
+ if (bcs->at_state.pending_commands & PC_DLE1) {
+ bcs->at_state.pending_commands &= ~PC_DLE1;
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_ACCEPT) {
+ bcs->at_state.pending_commands &= ~PC_ACCEPT;
+ schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_DIAL) {
+ bcs->at_state.pending_commands &= ~PC_DIAL;
+ schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
+ return;
+ }
+ if (bcs->at_state.pending_commands & PC_CID) {
+ switch (atomic_read(&cs->mode)) {
+ case M_UNIMODEM:
+ cs->at_state.pending_commands |= PC_CIDMODE;
+ dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
+ atomic_set(&cs->commands_pending, 1);
+ return;
+#ifdef GIG_MAYINITONDIAL
+ case M_UNKNOWN:
+ schedule_init(cs, MS_INIT);
+ return;
+#endif
+ }
+ bcs->at_state.pending_commands &= ~PC_CID;
+ cs->curchannel = bcs->channel;
+#ifdef GIG_RETRYCID
+ cs->retry_count = 2;
+#else
+ cs->retry_count = 1;
+#endif
+ schedule_sequence(cs, &cs->at_state, SEQ_CID);
+ return;
+ }
+ }
+}
+
+static void process_events(struct cardstate *cs)
+{
+ struct event_t *ev;
+ unsigned head, tail;
+ int i;
+ int check_flags = 0;
+ int was_busy;
+
+ /* no locking needed (only one reader) */
+ head = atomic_read(&cs->ev_head);
+
+ for (i = 0; i < 2 * MAX_EVENTS; ++i) {
+ tail = atomic_read(&cs->ev_tail);
+ if (tail == head) {
+ if (!check_flags && !atomic_read(&cs->commands_pending))
+ break;
+ check_flags = 0;
+ process_command_flags(cs);
+ tail = atomic_read(&cs->ev_tail);
+ if (tail == head) {
+ if (!atomic_read(&cs->commands_pending))
+ break;
+ continue;
+ }
+ }
+
+ ev = cs->events + head;
+ was_busy = cs->cur_at_seq != SEQ_NONE;
+ process_event(cs, ev);
+ kfree(ev->ptr);
+ ev->ptr = NULL;
+ if (was_busy && cs->cur_at_seq == SEQ_NONE)
+ check_flags = 1;
+
+ head = (head + 1) % MAX_EVENTS;
+ atomic_set(&cs->ev_head, head);
+ }
+
+ if (i == 2 * MAX_EVENTS) {
+ err("infinite loop in process_events; aborting.");
+ }
+}
+
+/* tasklet scheduled on any event received from the Gigaset device
+ * parameter:
+ * data ISDN controller state structure
+ */
+void gigaset_handle_event(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+
+ IFNULLRET(cs);
+ IFNULLRET(cs->inbuf);
+
+ /* handle incoming data on control/common channel */
+ if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) {
+ dbg(DEBUG_INTR, "processing new data");
+ cs->ops->handle_input(cs->inbuf);
+ }
+
+ process_events(cs);
+}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
new file mode 100644
index 00000000000..729edcdb6da
--- /dev/null
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -0,0 +1,938 @@
+/* Siemens Gigaset 307x driver
+ * Common header file for all connection variants
+ *
+ * Written by Stefan Eilers <Eilers.Stefan@epost.de>
+ * and Hansjoerg Lipp <hjlipp@web.de>
+ *
+ * Version: $Id: gigaset.h,v 1.97.4.26 2006/02/04 18:28:16 hjlipp Exp $
+ * ===========================================================================
+ */
+
+#ifndef GIGASET_H
+#define GIGASET_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/isdnif.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_defs.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/list.h>
+
+#define GIG_VERSION {0,5,0,0}
+#define GIG_COMPAT {0,4,0,0}
+
+#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
+#define MAX_RESP_SIZE 512 /* Max. size of a response string */
+#define HW_HDR_LEN 2 /* Header size used to store ack info */
+
+#define MAX_EVENTS 64 /* size of event queue */
+
+#define RBUFSIZE 8192
+#define SBUFSIZE 4096 /* sk_buff payload size */
+
+#define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */
+#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
+
+/* compile time options */
+#define GIG_MAJOR 0
+
+#define GIG_MAYINITONDIAL
+#define GIG_RETRYCID
+#define GIG_X75
+
+#define MAX_TIMER_INDEX 1000
+#define MAX_SEQ_INDEX 1000
+
+#define GIG_TICK (HZ / 10)
+
+/* timeout values (unit: 1 sec) */
+#define INIT_TIMEOUT 1
+
+/* timeout values (unit: 0.1 sec) */
+#define RING_TIMEOUT 3 /* for additional parameters to RING */
+#define BAS_TIMEOUT 20 /* for response to Base USB ops */
+#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
+
+#define BAS_RETRY 3 /* max. retries for base USB ops */
+
+#define MAXACT 3
+
+#define IFNULL(a) if (unlikely(!(a)))
+#define IFNULLRET(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return; }
+#define IFNULLRETVAL(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return (b); }
+#define IFNULLCONT(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); continue; }
+#define IFNULLGOTO(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); goto b; }
+
+extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
+
+/* any combination of these can be given with the 'debug=' parameter to insmod, e.g.
+ * 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and DEBUG_INTR. */
+enum debuglevel { /* up to 24 bits (atomic_t) */
+ DEBUG_REG = 0x0002, /* serial port I/O register operations */
+ DEBUG_OPEN = 0x0004, /* open/close serial port */
+ DEBUG_INTR = 0x0008, /* interrupt processing */
+ DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on interrupt
+ requests, not available as run-time option */
+ DEBUG_CMD = 0x00020, /* sent/received LL commands */
+ DEBUG_STREAM = 0x00040, /* application data stream I/O events */
+ DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
+ DEBUG_LLDATA = 0x00100, /* sent/received LL data */
+ DEBUG_INTR_0 = 0x00200, /* serial port output interrupt processing */
+ DEBUG_DRIVER = 0x00400, /* driver structure */
+ DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
+ DEBUG_WRITE = 0x01000, /* M105 data write */
+ DEBUG_TRANSCMD = 0x02000, /*AT-COMMANDS+RESPONSES*/
+ DEBUG_MCMD = 0x04000, /*COMMANDS THAT ARE SENT VERY OFTEN*/
+ DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data structures */
+ DEBUG_LOCK = 0x10000, /* semaphore operations */
+ DEBUG_OUTPUT = 0x20000, /* output to device */
+ DEBUG_ISO = 0x40000, /* isochronous transfers */
+ DEBUG_IF = 0x80000, /* character device operations */
+ DEBUG_USBREQ = 0x100000, /* USB communication (except payload data) */
+ DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when MS_LOCKED */
+
+ DEBUG_ANY = 0x3fffff, /* print message if any of the others is activated */
+};
+
+#ifdef CONFIG_GIGASET_DEBUG
+#define DEBUG_DEFAULT (DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
+//#define DEBUG_DEFAULT (DEBUG_LOCK | DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUF_IF | DEBUG_DRIVER | DEBUG_OUTPUT | DEBUG_INTR)
+#else
+#define DEBUG_DEFAULT 0
+#endif
+
+/* redefine syslog macros to prepend module name instead of entire source path */
+/* The space before the comma in ", ##" is needed by gcc 2.95 */
+#undef info
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
+
+#undef notice
+#define notice(format, arg...) printk(KERN_NOTICE "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
+
+#undef warn
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
+
+#undef err
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
+
+#undef dbg
+#ifdef CONFIG_GIGASET_DEBUG
+#define dbg(level, format, arg...) do { if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
+ printk(KERN_DEBUG "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg); } while (0)
+#else
+#define dbg(level, format, arg...) do {} while (0)
+#endif
+
+void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
+ size_t len, const unsigned char *buf, int from_user);
+
+/* connection state */
+#define ZSAU_NONE 0
+#define ZSAU_DISCONNECT_IND 4
+#define ZSAU_OUTGOING_CALL_PROCEEDING 1
+#define ZSAU_PROCEEDING 1
+#define ZSAU_CALL_DELIVERED 2
+#define ZSAU_ACTIVE 3
+#define ZSAU_NULL 5
+#define ZSAU_DISCONNECT_REQ 6
+#define ZSAU_UNKNOWN -1
+
+/* USB control transfer requests */
+#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
+#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
+
+/* int-in-events 3070 */
+#define HD_B1_FLOW_CONTROL 0x80
+#define HD_B2_FLOW_CONTROL 0x81
+#define HD_RECEIVEATDATA_ACK (0x35) // 3070 // att: HD_RECEIVE>>AT<<DATA_ACK
+#define HD_READY_SEND_ATDATA (0x36) // 3070
+#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070
+#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070
+#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070
+#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070
+#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070
+#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070
+#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070
+// Powermangment
+#define HD_SUSPEND_END (0x61) // ISurf USB
+// Configuration
+#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070
+
+/* control requests 3070 */
+#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070
+#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070
+#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070
+#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070
+#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070
+#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070
+#define HD_WRITE_ATMESSAGE (0x12) // 3070
+#define HD_READ_ATMESSAGE (0x13) // 3070
+#define HD_OPEN_ATCHANNEL (0x28) // 3070
+#define HD_CLOSE_ATCHANNEL (0x29) // 3070
+
+/* USB frames for isochronous transfer */
+#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
+#define BAS_NUMFRAMES 8 /* number of frames per URB */
+#define BAS_MAXFRAME 16 /* allocated bytes per frame */
+#define BAS_NORMFRAME 8 /* send size without flow control */
+#define BAS_HIGHFRAME 10 /* " " with positive flow control */
+#define BAS_LOWFRAME 5 /* " " with negative flow control */
+#define BAS_CORRFRAMES 4 /* flow control multiplicator */
+
+#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isochronous input buffer per URB */
+#define BAS_OUTBUFSIZE 4096 /* size of common isochronous output buffer */
+#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isochronous output buffer */
+
+#define BAS_INURBS 3
+#define BAS_OUTURBS 3
+
+/* variable commands in struct bc_state */
+#define AT_ISO 0
+#define AT_DIAL 1
+#define AT_MSN 2
+#define AT_BC 3
+#define AT_PROTO 4
+#define AT_TYPE 5
+#define AT_HLC 6
+#define AT_NUM 7
+
+/* variables in struct at_state_t */
+#define VAR_ZSAU 0
+#define VAR_ZDLE 1
+#define VAR_ZVLS 2
+#define VAR_ZCTP 3
+#define VAR_NUM 4
+
+#define STR_NMBR 0
+#define STR_ZCPN 1
+#define STR_ZCON 2
+#define STR_ZBC 3
+#define STR_ZHLC 4
+#define STR_NUM 5
+
+#define EV_TIMEOUT -105
+#define EV_IF_VER -106
+#define EV_PROC_CIDMODE -107
+#define EV_SHUTDOWN -108
+#define EV_START -110
+#define EV_STOP -111
+#define EV_IF_LOCK -112
+#define EV_PROTO_L2 -113
+#define EV_ACCEPT -114
+#define EV_DIAL -115
+#define EV_HUP -116
+#define EV_BC_OPEN -117
+#define EV_BC_CLOSED -118
+
+/* input state */
+#define INS_command 0x0001
+#define INS_DLE_char 0x0002
+#define INS_byte_stuff 0x0004
+#define INS_have_data 0x0008
+#define INS_skip_frame 0x0010
+#define INS_DLE_command 0x0020
+#define INS_flag_hunt 0x0040
+
+/* channel state */
+#define CHS_D_UP 0x01
+#define CHS_B_UP 0x02
+#define CHS_NOTIFY_LL 0x04
+
+#define ICALL_REJECT 0
+#define ICALL_ACCEPT 1
+#define ICALL_IGNORE 2
+
+/* device state */
+#define MS_UNINITIALIZED 0
+#define MS_INIT 1
+#define MS_LOCKED 2
+#define MS_SHUTDOWN 3
+#define MS_RECOVER 4
+#define MS_READY 5
+
+/* mode */
+#define M_UNKNOWN 0
+#define M_CONFIG 1
+#define M_UNIMODEM 2
+#define M_CID 3
+
+/* start mode */
+#define SM_LOCKED 0
+#define SM_ISDN 1 /* default */
+
+struct gigaset_ops;
+struct gigaset_driver;
+
+struct usb_cardstate;
+struct ser_cardstate;
+struct bas_cardstate;
+
+struct bc_state;
+struct usb_bc_state;
+struct ser_bc_state;
+struct bas_bc_state;
+
+struct reply_t {
+ int resp_code; /* RSP_XXXX */
+ int min_ConState; /* <0 => ignore */
+ int max_ConState; /* <0 => ignore */
+ int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
+ int new_ConState; /* <0 => ignore */
+ int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
+ int action[MAXACT]; /* ACT_XXXX */
+ char *command; /* NULL==none */
+};
+
+extern struct reply_t gigaset_tab_cid_m10x[];
+extern struct reply_t gigaset_tab_nocid_m10x[];
+
+struct inbuf_t {
+ unsigned char *rcvbuf; /* usb-gigaset receive buffer */
+ struct bc_state *bcs;
+ struct cardstate *cs;
+ int inputstate;
+
+ atomic_t head, tail;
+ unsigned char data[RBUFSIZE];
+};
+
+/* isochronous write buffer structure
+ * circular buffer with pad area for extraction of complete USB frames
+ * - data[read..nextread-1] is valid data already submitted to the USB subsystem
+ * - data[nextread..write-1] is valid data yet to be sent
+ * - data[write] is the next byte to write to
+ * - in byte-oriented L2 procotols, it is completely free
+ * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
+ * - data[write+1..read-1] is free
+ * - wbits is the number of valid data bits in data[write], starting at the LSB
+ * - writesem is the semaphore for writing to the buffer:
+ * if writesem <= 0, data[write..read-1] is currently being written to
+ * - idle contains the byte value to repeat when the end of valid data is
+ * reached; if nextread==write (buffer contains no data to send), either the
+ * BAS_OUTBUFPAD bytes immediately before data[write] (if write>=BAS_OUTBUFPAD)
+ * or those of the pad area (if write<BAS_OUTBUFPAD) are also filled with that
+ * value
+ * - optionally, the following statistics on the buffer's usage can be collected:
+ * maxfill: maximum number of bytes occupied
+ * idlefills: number of times a frame of idle bytes is prepared
+ * emptygets: number of times the buffer was empty when a data frame was requested
+ * backtoback: number of times two data packets were entered into the buffer
+ * without intervening idle flags
+ * nakedback: set if no idle flags have been inserted since the last data packet
+ */
+struct isowbuf_t {
+ atomic_t read;
+ atomic_t nextread;
+ atomic_t write;
+ atomic_t writesem;
+ int wbits;
+ unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
+ unsigned char idle;
+};
+
+/* isochronous write URB context structure
+ * data to be stored along with the URB and retrieved when it is returned
+ * as completed by the USB subsystem
+ * - urb: pointer to the URB itself
+ * - bcs: pointer to the B Channel control structure
+ * - limit: end of write buffer area covered by this URB
+ */
+struct isow_urbctx_t {
+ struct urb *urb;
+ struct bc_state *bcs;
+ int limit;
+};
+
+/* AT state structure
+ * data associated with the state of an ISDN connection, whether or not
+ * it is currently assigned a B channel
+ */
+struct at_state_t {
+ struct list_head list;
+ int waiting;
+ int getstring;
+ atomic_t timer_index;
+ unsigned long timer_expires;
+ int timer_active;
+ unsigned int ConState; /* State of connection */
+ struct reply_t *replystruct;
+ int cid;
+ int int_var[VAR_NUM]; /* see VAR_XXXX */
+ char *str_var[STR_NUM]; /* see STR_XXXX */
+ unsigned pending_commands; /* see PC_XXXX */
+ atomic_t seq_index;
+
+ struct cardstate *cs;
+ struct bc_state *bcs;
+};
+
+struct resp_type_t {
+ unsigned char *response;
+ int resp_code; /* RSP_XXXX */
+ int type; /* RT_XXXX */
+};
+
+struct prot_skb {
+ atomic_t empty;
+ struct semaphore *sem;
+ struct sk_buff *skb;
+};
+
+struct event_t {
+ int type;
+ void *ptr, *arg;
+ int parameter;
+ int cid;
+ struct at_state_t *at_state;
+};
+
+/* This buffer holds all information about the used B-Channel */
+struct bc_state {
+ struct sk_buff *tx_skb; /* Current transfer buffer to modem */
+ struct sk_buff_head squeue; /* B-Channel send Queue */
+
+ /* Variables for debugging .. */
+ int corrupted; /* Counter for corrupted packages */
+ int trans_down; /* Counter of packages (downstream) */
+ int trans_up; /* Counter of packages (upstream) */
+
+ struct at_state_t at_state;
+ unsigned long rcvbytes;
+
+ __u16 fcs;
+ struct sk_buff *skb;
+ int inputstate; /* see INS_XXXX */
+
+ int channel;
+
+ struct cardstate *cs;
+
+ unsigned chstate; /* bitmap (CHS_*) */
+ int ignore;
+ unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */
+ char *commands[AT_NUM]; /* see AT_XXXX */
+
+#ifdef CONFIG_GIGASET_DEBUG
+ int emptycount;
+#endif
+ int busy;
+ int use_count;
+
+ /* hardware drivers */
+ union {
+ struct ser_bc_state *ser; /* private data of serial hardware driver */
+ struct usb_bc_state *usb; /* private data of usb hardware driver */
+ struct bas_bc_state *bas;
+ } hw;
+};
+
+struct cardstate {
+ struct gigaset_driver *driver;
+ unsigned minor_index;
+
+ const struct gigaset_ops *ops;
+
+ /* Stuff to handle communication */
+ //wait_queue_head_t initwait;
+ wait_queue_head_t waitqueue;
+ int waiting;
+ atomic_t mode; /* see M_XXXX */
+ atomic_t mstate; /* Modem state: see MS_XXXX */
+ /* only changed by the event layer */
+ int cmd_result;
+
+ int channels;
+ struct bc_state *bcs; /* Array of struct bc_state */
+
+ int onechannel; /* data and commands transmitted in one stream (M10x) */
+
+ spinlock_t lock;
+ struct at_state_t at_state; /* at_state_t for cid == 0 */
+ struct list_head temp_at_states; /* list of temporary "struct at_state_t"s without B channel */
+
+ struct inbuf_t *inbuf;
+
+ struct cmdbuf_t *cmdbuf, *lastcmdbuf;
+ spinlock_t cmdlock;
+ unsigned curlen, cmdbytes;
+
+ unsigned open_count;
+ struct tty_struct *tty;
+ struct tasklet_struct if_wake_tasklet;
+ unsigned control_state;
+
+ unsigned fwver[4];
+ int gotfwver;
+
+ atomic_t running; /* !=0 if events are handled */
+ atomic_t connected; /* !=0 if hardware is connected */
+
+ atomic_t cidmode;
+
+ int myid; /* id for communication with LL */
+ isdn_if iif;
+
+ struct reply_t *tabnocid;
+ struct reply_t *tabcid;
+ int cs_init;
+ int ignoreframes; /* frames to ignore after setting up the B channel */
+ struct semaphore sem; /* locks this structure: */
+ /* connected is not changed, */
+ /* hardware_up is not changed, */
+ /* MState is not changed to or from MS_LOCKED */
+
+ struct timer_list timer;
+ int retry_count;
+ int dle; /* !=0 if modem commands/responses are dle encoded */
+ int cur_at_seq; /* sequence of AT commands being processed */
+ int curchannel; /* channel, those commands are meant for */
+ atomic_t commands_pending; /* flag(s) in xxx.commands_pending have been set */
+ struct tasklet_struct event_tasklet; /* tasklet for serializing AT commands. Scheduled
+ * -> for modem reponses (and incomming data for M10x)
+ * -> on timeout
+ * -> after setting bits in xxx.at_state.pending_command
+ * (e.g. command from LL) */
+ struct tasklet_struct write_tasklet; /* tasklet for serial output
+ * (not used in base driver) */
+
+ /* event queue */
+ struct event_t events[MAX_EVENTS];
+ atomic_t ev_tail, ev_head;
+ spinlock_t ev_lock;
+
+ /* current modem response */
+ unsigned char respdata[MAX_RESP_SIZE];
+ unsigned cbytes;
+
+ /* hardware drivers */
+ union {
+ struct usb_cardstate *usb; /* private data of USB hardware driver */
+ struct ser_cardstate *ser; /* private data of serial hardware driver */
+ struct bas_cardstate *bas; /* private data of base hardware driver */
+ } hw;
+};
+
+struct gigaset_driver {
+ struct list_head list;
+ spinlock_t lock; /* locks minor tables and blocked */
+ //struct semaphore sem; /* locks this structure */
+ struct tty_driver *tty;
+ unsigned have_tty;
+ unsigned minor;
+ unsigned minors;
+ struct cardstate *cs;
+ unsigned *flags;
+ int blocked;
+
+ const struct gigaset_ops *ops;
+ struct module *owner;
+};
+
+struct cmdbuf_t {
+ struct cmdbuf_t *next, *prev;
+ int len, offset;
+ struct tasklet_struct *wake_tasklet;
+ unsigned char buf[0];
+};
+
+struct bas_bc_state {
+ /* isochronous output state */
+ atomic_t running;
+ atomic_t corrbytes;
+ spinlock_t isooutlock;
+ struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
+ struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
+ struct isowbuf_t *isooutbuf;
+ unsigned numsub; /* submitted URB counter (for diagnostic messages only) */
+ struct tasklet_struct sent_tasklet;
+
+ /* isochronous input state */
+ spinlock_t isoinlock;
+ struct urb *isoinurbs[BAS_INURBS];
+ unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
+ struct urb *isoindone; /* completed isoc read URB */
+ int loststatus; /* status of dropped URB */
+ unsigned isoinlost; /* number of bytes lost */
+ /* state of bit unstuffing algorithm (in addition to BC_state.inputstate) */
+ unsigned seqlen; /* number of '1' bits not yet unstuffed */
+ unsigned inbyte, inbits; /* collected bits for next byte */
+ /* statistics */
+ unsigned goodbytes; /* bytes correctly received */
+ unsigned alignerrs; /* frames with incomplete byte at end */
+ unsigned fcserrs; /* FCS errors */
+ unsigned frameerrs; /* framing errors */
+ unsigned giants; /* long frames */
+ unsigned runts; /* short frames */
+ unsigned aborts; /* HDLC aborts */
+ unsigned shared0s; /* '0' bits shared between flags */
+ unsigned stolen0s; /* '0' stuff bits also serving as leading flag bits */
+ struct tasklet_struct rcvd_tasklet;
+};
+
+struct gigaset_ops {
+ /* Called from ev-layer.c/interface.c for sending AT commands to the device */
+ int (*write_cmd)(struct cardstate *cs,
+ const unsigned char *buf, int len,
+ struct tasklet_struct *wake_tasklet);
+
+ /* Called from interface.c for additional device control */
+ int (*write_room)(struct cardstate *cs);
+ int (*chars_in_buffer)(struct cardstate *cs);
+ int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
+
+ /* Called from ev-layer.c after setting up connection
+ * Should call gigaset_bchannel_up(), when finished. */
+ int (*init_bchannel)(struct bc_state *bcs);
+
+ /* Called from ev-layer.c after hanging up
+ * Should call gigaset_bchannel_down(), when finished. */
+ int (*close_bchannel)(struct bc_state *bcs);
+
+ /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
+ int (*initbcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
+ int (*freebcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_stop() or gigaset_bchannel_down() for resetting bcs->hw.xxx */
+ void (*reinitbcshw)(struct bc_state *bcs);
+
+ /* Called by gigaset_initcs() for setting up cs->hw.xxx */
+ int (*initcshw)(struct cardstate *cs);
+
+ /* Called by gigaset_freecs() for freeing cs->hw.xxx */
+ void (*freecshw)(struct cardstate *cs);
+
+ ///* Called by gigaset_stop() for killing URBs, shutting down the device, ...
+ // hardwareup: ==0: don't try to shut down the device, hardware is really not accessible
+ // !=0: hardware still up */
+ //void (*stophw)(struct cardstate *cs, int hardwareup);
+
+ /* Called from common.c/interface.c for additional serial port control */
+ int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, unsigned new_state);
+ int (*baud_rate)(struct cardstate *cs, unsigned cflag);
+ int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
+
+ /* Called from i4l.c to put an skb into the send-queue. */
+ int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
+
+ /* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+ void (*handle_input)(struct inbuf_t *inbuf);
+
+};
+
+/* = Common structures and definitions ======================================= */
+
+/* Parser states for DLE-Event:
+ * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
+ * <DLE_FLAG>: 0x10
+ * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
+ */
+#define DLE_FLAG 0x10
+
+/* ===========================================================================
+ * Functions implemented in asyncdata.c
+ */
+
+/* Called from i4l.c to put an skb into the send-queue.
+ * After sending gigaset_skb_sent() should be called. */
+int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+void gigaset_m10x_input(struct inbuf_t *inbuf);
+
+/* ===========================================================================
+ * Functions implemented in isocdata.c
+ */
+
+/* Called from i4l.c to put an skb into the send-queue.
+ * After sending gigaset_skb_sent() should be called. */
+int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Called from ev-layer.c to process a block of data
+ * received through the common/control channel. */
+void gigaset_isoc_input(struct inbuf_t *inbuf);
+
+/* Called from bas-gigaset.c to process a block of data
+ * received through the isochronous channel */
+void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs);
+
+/* Called from bas-gigaset.c to put a block of data
+ * into the isochronous output buffer */
+int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
+
+/* Called from bas-gigaset.c to initialize the isochronous output buffer */
+void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
+
+/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
+int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
+
+/* ===========================================================================
+ * Functions implemented in i4l.c/gigaset.h
+ */
+
+/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */
+int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid);
+
+/* Called from xxx-gigaset.c to indicate completion of sending an skb */
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
+int gigaset_isdn_icall(struct at_state_t *at_state);
+int gigaset_isdn_setup_accept(struct at_state_t *at_state);
+int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data);
+
+void gigaset_i4l_cmd(struct cardstate *cs, int cmd);
+void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
+
+
+static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
+{
+ isdn_ctrl response;
+
+ /* error -> LL */
+ dbg(DEBUG_CMD, "sending L1ERR");
+ response.driver = bcs->cs->myid;
+ response.command = ISDN_STAT_L1ERR;
+ response.arg = bcs->channel;
+ response.parm.errcode = ISDN_STAT_L1ERR_RECV;
+ bcs->cs->iif.statcallb(&response);
+}
+
+/* ===========================================================================
+ * Functions implemented in ev-layer.c
+ */
+
+/* tasklet called from common.c to process queued events */
+void gigaset_handle_event(unsigned long data);
+
+/* called from isocdata.c / asyncdata.c
+ * when a complete modem response line has been received */
+void gigaset_handle_modem_response(struct cardstate *cs);
+
+/* ===========================================================================
+ * Functions implemented in proc.c
+ */
+
+/* initialize sysfs for device */
+void gigaset_init_dev_sysfs(struct usb_interface *interface);
+void gigaset_free_dev_sysfs(struct usb_interface *interface);
+
+/* ===========================================================================
+ * Functions implemented in common.c/gigaset.h
+ */
+
+void gigaset_bcs_reinit(struct bc_state *bcs);
+void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
+ struct cardstate *cs, int cid);
+int gigaset_get_channel(struct bc_state *bcs);
+void gigaset_free_channel(struct bc_state *bcs);
+int gigaset_get_channels(struct cardstate *cs);
+void gigaset_free_channels(struct cardstate *cs);
+void gigaset_block_channels(struct cardstate *cs);
+
+/* Allocate and initialize driver structure. */
+struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
+ const char *procname,
+ const char *devname,
+ const char *devfsname,
+ const struct gigaset_ops *ops,
+ struct module *owner);
+
+/* Deallocate driver structure. */
+void gigaset_freedriver(struct gigaset_driver *drv);
+void gigaset_debugdrivers(void);
+struct cardstate *gigaset_get_cs_by_minor(unsigned minor);
+struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
+struct cardstate *gigaset_get_cs_by_id(int id);
+
+/* For drivers without fixed assignment device<->cardstate (usb) */
+struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv);
+void gigaset_unassign(struct cardstate *cs);
+void gigaset_blockdriver(struct gigaset_driver *drv);
+
+/* Allocate and initialize card state. Calls hardware dependent gigaset_init[b]cs(). */
+struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
+ int onechannel, int ignoreframes,
+ int cidmode, const char *modulename);
+
+/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
+void gigaset_freecs(struct cardstate *cs);
+
+/* Tell common.c that hardware and driver are ready. */
+int gigaset_start(struct cardstate *cs);
+
+/* Tell common.c that the device is not present any more. */
+void gigaset_stop(struct cardstate *cs);
+
+/* Tell common.c that the driver is being unloaded. */
+void gigaset_shutdown(struct cardstate *cs);
+
+/* Tell common.c that an skb has been sent. */
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
+
+/* Append event to the queue.
+ * Returns NULL on failure or a pointer to the event on success.
+ * ptr must be kmalloc()ed (and not be freed by the caller).
+ */
+struct event_t *gigaset_add_event(struct cardstate *cs,
+ struct at_state_t *at_state, int type,
+ void *ptr, int parameter, void *arg);
+
+/* Called on CONFIG1 command from frontend. */
+int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode
+
+/* cs->lock must not be locked */
+static inline void gigaset_schedule_event(struct cardstate *cs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&cs->lock, flags);
+ if (atomic_read(&cs->running))
+ tasklet_schedule(&cs->event_tasklet);
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+
+/* Tell common.c that B channel has been closed. */
+/* cs->lock must not be locked */
+static inline void gigaset_bchannel_down(struct bc_state *bcs)
+{
+ gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
+
+ dbg(DEBUG_CMD, "scheduling BC_CLOSED");
+ gigaset_schedule_event(bcs->cs);
+}
+
+/* Tell common.c that B channel has been opened. */
+/* cs->lock must not be locked */
+static inline void gigaset_bchannel_up(struct bc_state *bcs)
+{
+ gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
+
+ dbg(DEBUG_CMD, "scheduling BC_OPEN");
+ gigaset_schedule_event(bcs->cs);
+}
+
+/* handling routines for sk_buff */
+/* ============================= */
+
+/* private version of __skb_put()
+ * append 'len' bytes to the content of 'skb', already knowing that the
+ * existing buffer can accomodate them
+ * returns a pointer to the location where the new bytes should be copied to
+ * This function does not take any locks so it must be called with the
+ * appropriate locks held only.
+ */
+static inline unsigned char *gigaset_skb_put_quick(struct sk_buff *skb,
+ unsigned int len)
+{
+ unsigned char *tmp = skb->tail;
+ /*SKB_LINEAR_ASSERT(skb);*/ /* not needed here */
+ skb->tail += len;
+ skb->len += len;
+ return tmp;
+}
+
+/* pass received skb to LL
+ * Warning: skb must not be accessed anymore!
+ */
+static inline void gigaset_rcv_skb(struct sk_buff *skb,
+ struct cardstate *cs,
+ struct bc_state *bcs)
+{
+ cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
+ bcs->trans_down++;
+}
+
+/* handle reception of corrupted skb
+ * Warning: skb must not be accessed anymore!
+ */
+static inline void gigaset_rcv_error(struct sk_buff *procskb,
+ struct cardstate *cs,
+ struct bc_state *bcs)
+{
+ if (procskb)
+ dev_kfree_skb(procskb);
+
+ if (bcs->ignore)
+ --bcs->ignore;
+ else {
+ ++bcs->corrupted;
+ gigaset_isdn_rcv_err(bcs);
+ }
+}
+
+
+/* bitwise byte inversion table */
+extern __u8 gigaset_invtab[]; /* in common.c */
+
+
+/* append received bytes to inbuf */
+static inline int gigaset_fill_inbuf(struct inbuf_t *inbuf,
+ const unsigned char *src,
+ unsigned numbytes)
+{
+ unsigned n, head, tail, bytesleft;
+
+ dbg(DEBUG_INTR, "received %u bytes", numbytes);
+
+ if (!numbytes)
+ return 0;
+
+ bytesleft = numbytes;
+ tail = atomic_read(&inbuf->tail);
+ head = atomic_read(&inbuf->head);
+ dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+
+ while (bytesleft) {
+ if (head > tail)
+ n = head - 1 - tail;
+ else if (head == 0)
+ n = (RBUFSIZE-1) - tail;
+ else
+ n = RBUFSIZE - tail;
+ if (!n) {
+ err("buffer overflow (%u bytes lost)", bytesleft);
+ break;
+ }
+ if (n > bytesleft)
+ n = bytesleft;
+ memcpy(inbuf->data + tail, src, n);
+ bytesleft -= n;
+ tail = (tail + n) % RBUFSIZE;
+ src += n;
+ }
+ dbg(DEBUG_INTR, "setting tail to %u", tail);
+ atomic_set(&inbuf->tail, tail);
+ return numbytes != bytesleft;
+}
+
+/* ===========================================================================
+ * Functions implemented in interface.c
+ */
+
+/* initialize interface */
+void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
+ const char *devname, const char *devfsname);
+/* release interface */
+void gigaset_if_freedriver(struct gigaset_driver *drv);
+/* add minor */
+void gigaset_if_init(struct cardstate *cs);
+/* remove minor */
+void gigaset_if_free(struct cardstate *cs);
+/* device received data */
+void gigaset_if_receive(struct cardstate *cs,
+ unsigned char *buffer, size_t len);
+
+#endif
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
new file mode 100644
index 00000000000..731a675f21b
--- /dev/null
+++ b/drivers/isdn/gigaset/i4l.c
@@ -0,0 +1,567 @@
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers (Eilers.Stefan@epost.de),
+ * Hansjoerg Lipp (hjlipp@web.de),
+ * Tilman Schmidt (tilman@imap.cc).
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: i4l.c,v 1.3.2.9 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+/* == Handling of I4L IO ============================================================================*/
+
+/* writebuf_from_LL
+ * called by LL to transmit data on an open channel
+ * inserts the buffer data into the send queue and starts the transmission
+ * Note that this operation must not sleep!
+ * When the buffer is processed completely, gigaset_skb_sent() should be called.
+ * parameters:
+ * driverID driver ID as assigned by LL
+ * channel channel number
+ * ack if != 0 LL wants to be notified on completion via statcallb(ISDN_STAT_BSENT)
+ * skb skb containing data to send
+ * return value:
+ * number of accepted bytes
+ * 0 if temporarily unable to accept data (out of buffer space)
+ * <0 on error (eg. -EINVAL)
+ */
+static int writebuf_from_LL(int driverID, int channel, int ack, struct sk_buff *skb)
+{
+ struct cardstate *cs;
+ struct bc_state *bcs;
+ unsigned len;
+ unsigned skblen;
+
+ if (!(cs = gigaset_get_cs_by_id(driverID))) {
+ err("%s: invalid driver ID (%d)", __func__, driverID);
+ return -ENODEV;
+ }
+ if (channel < 0 || channel >= cs->channels) {
+ err("%s: invalid channel ID (%d)", __func__, channel);
+ return -ENODEV;
+ }
+ bcs = &cs->bcs[channel];
+ len = skb->len;
+
+ dbg(DEBUG_LLDATA,
+ "Receiving data from LL (id: %d, channel: %d, ack: %d, size: %d)",
+ driverID, channel, ack, len);
+
+ if (!len) {
+ if (ack)
+ warn("not ACKing empty packet from LL");
+ return 0;
+ }
+ if (len > MAX_BUF_SIZE) {
+ err("%s: packet too large (%d bytes)", __func__, channel);
+ return -EINVAL;
+ }
+
+ if (!atomic_read(&cs->connected))
+ return -ENODEV;
+
+ skblen = ack ? len : 0;
+ skb->head[0] = skblen & 0xff;
+ skb->head[1] = skblen >> 8;
+ dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", len, skblen,
+ (unsigned) skb->head[0], (unsigned) skb->head[1]);
+
+ /* pass to device-specific module */
+ return cs->ops->send_skb(bcs, skb);
+}
+
+void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
+{
+ unsigned len;
+ isdn_ctrl response;
+
+ ++bcs->trans_up;
+
+ if (skb->len)
+ warn("%s: skb->len==%d", __func__, skb->len);
+
+ len = (unsigned char) skb->head[0] |
+ (unsigned) (unsigned char) skb->head[1] << 8;
+ if (len) {
+ dbg(DEBUG_MCMD,
+ "Acknowledge sending to LL (id: %d, channel: %d size: %u)",
+ bcs->cs->myid, bcs->channel, len);
+
+ response.driver = bcs->cs->myid;
+ response.command = ISDN_STAT_BSENT;
+ response.arg = bcs->channel;
+ response.parm.length = len;
+ bcs->cs->iif.statcallb(&response);
+ }
+}
+EXPORT_SYMBOL_GPL(gigaset_skb_sent);
+
+/* This function will be called by LL to send commands
+ * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
+ * so don't put too much effort into it.
+ */
+static int command_from_LL(isdn_ctrl *cntrl)
+{
+ struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver);
+ //isdn_ctrl response;
+ //unsigned long flags;
+ struct bc_state *bcs;
+ int retval = 0;
+ struct setup_parm *sp;
+
+ //dbg(DEBUG_ANY, "Gigaset_HW: Receiving command");
+ gigaset_debugdrivers();
+
+ /* Terminate this call if no device is present. Bt if the command is "ISDN_CMD_LOCK" or
+ * "ISDN_CMD_UNLOCK" then execute it due to the fact that they are device independent !
+ */
+ //FIXME "remove test for &connected"
+ if ((!cs || !atomic_read(&cs->connected))) {
+ warn("LL tried to access unknown device with nr. %d",
+ cntrl->driver);
+ return -ENODEV;
+ }
+
+ switch (cntrl->command) {
+ case ISDN_CMD_IOCTL:
+
+ dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver:%d,arg: %ld)",
+ cntrl->driver, cntrl->arg);
+
+ warn("ISDN_CMD_IOCTL is not supported.");
+ return -EINVAL;
+
+ case ISDN_CMD_DIAL:
+ dbg(DEBUG_ANY, "ISDN_CMD_DIAL (driver: %d, channel: %ld, "
+ "phone: %s,ownmsn: %s, si1: %d, si2: %d)",
+ cntrl->driver, cntrl->arg,
+ cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
+ cntrl->parm.setup.si1, cntrl->parm.setup.si2);
+
+ if (cntrl->arg >= cs->channels) {
+ err("invalid channel (%d)", (int) cntrl->arg);
+ return -EINVAL;
+ }
+
+ bcs = cs->bcs + cntrl->arg;
+
+ if (!gigaset_get_channel(bcs)) {
+ err("channel not free");
+ return -EBUSY;
+ }
+
+ sp = kmalloc(sizeof *sp, GFP_ATOMIC);
+ if (!sp) {
+ gigaset_free_channel(bcs);
+ err("ISDN_CMD_DIAL: out of memory");
+ return -ENOMEM;
+ }
+ *sp = cntrl->parm.setup;
+
+ if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp,
+ atomic_read(&bcs->at_state.seq_index),
+ NULL)) {
+ //FIXME what should we do?
+ kfree(sp);
+ gigaset_free_channel(bcs);
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling DIAL");
+ gigaset_schedule_event(cs);
+ break;
+ case ISDN_CMD_ACCEPTD: //FIXME
+ dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
+
+ if (cntrl->arg >= cs->channels) {
+ err("invalid channel (%d)", (int) cntrl->arg);
+ return -EINVAL;
+ }
+
+ if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
+ EV_ACCEPT, NULL, 0, NULL)) {
+ //FIXME what should we do?
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling ACCEPT");
+ gigaset_schedule_event(cs);
+
+ break;
+ case ISDN_CMD_ACCEPTB:
+ dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
+ break;
+ case ISDN_CMD_HANGUP:
+ dbg(DEBUG_ANY,
+ "ISDN_CMD_HANGUP (channel: %d)", (int) cntrl->arg);
+
+ if (cntrl->arg >= cs->channels) {
+ err("ISDN_CMD_HANGUP: invalid channel (%u)",
+ (unsigned) cntrl->arg);
+ return -EINVAL;
+ }
+
+ if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
+ EV_HUP, NULL, 0, NULL)) {
+ //FIXME what should we do?
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling HUP");
+ gigaset_schedule_event(cs);
+
+ break;
+ case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME
+ dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ");
+ break;
+ case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME
+ dbg(DEBUG_ANY,
+ "ISDN_CMD_SETEAZ (id:%d, channel: %ld, number: %s)",
+ cntrl->driver, cntrl->arg, cntrl->parm.num);
+ break;
+ case ISDN_CMD_SETL2: /* Set L2 to given protocol */
+ dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (Channel: %ld, Proto: %lx)",
+ cntrl->arg & 0xff, (cntrl->arg >> 8));
+
+ if ((cntrl->arg & 0xff) >= cs->channels) {
+ err("invalid channel (%u)",
+ (unsigned) cntrl->arg & 0xff);
+ return -EINVAL;
+ }
+
+ if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state,
+ EV_PROTO_L2, NULL, cntrl->arg >> 8,
+ NULL)) {
+ //FIXME what should we do?
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling PROTO_L2");
+ gigaset_schedule_event(cs);
+ break;
+ case ISDN_CMD_SETL3: /* Set L3 to given protocol */
+ dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (Channel: %ld, Proto: %lx)",
+ cntrl->arg & 0xff, (cntrl->arg >> 8));
+
+ if ((cntrl->arg & 0xff) >= cs->channels) {
+ err("invalid channel (%u)",
+ (unsigned) cntrl->arg & 0xff);
+ return -EINVAL;
+ }
+
+ if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
+ err("invalid protocol %lu", cntrl->arg >> 8);
+ return -EINVAL;
+ }
+
+ break;
+ case ISDN_CMD_PROCEED:
+ dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME
+ break;
+ case ISDN_CMD_ALERT:
+ dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
+ if (cntrl->arg >= cs->channels) {
+ err("invalid channel (%d)", (int) cntrl->arg);
+ return -EINVAL;
+ }
+ //bcs = cs->bcs + cntrl->arg;
+ //bcs->proto2 = -1;
+ // FIXME
+ break;
+ case ISDN_CMD_REDIR:
+ dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME
+ break;
+ case ISDN_CMD_PROT_IO:
+ dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
+ break;
+ case ISDN_CMD_FAXCMD:
+ dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD");
+ break;
+ case ISDN_CMD_GETL2:
+ dbg(DEBUG_ANY, "ISDN_CMD_GETL2");
+ break;
+ case ISDN_CMD_GETL3:
+ dbg(DEBUG_ANY, "ISDN_CMD_GETL3");
+ break;
+ case ISDN_CMD_GETEAZ:
+ dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ");
+ break;
+ case ISDN_CMD_SETSIL:
+ dbg(DEBUG_ANY, "ISDN_CMD_SETSIL");
+ break;
+ case ISDN_CMD_GETSIL:
+ dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
+ break;
+ default:
+ err("unknown command %d from LL",
+ cntrl->command);
+ return -EINVAL;
+ }
+
+ return retval;
+}
+
+void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
+{
+ isdn_ctrl command;
+
+ command.driver = cs->myid;
+ command.command = cmd;
+ command.arg = 0;
+ cs->iif.statcallb(&command);
+}
+
+void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
+{
+ isdn_ctrl command;
+
+ command.driver = bcs->cs->myid;
+ command.command = cmd;
+ command.arg = bcs->channel;
+ bcs->cs->iif.statcallb(&command);
+}
+
+int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
+{
+ struct bc_state *bcs = at_state->bcs;
+ unsigned proto;
+ const char *bc;
+ size_t length[AT_NUM];
+ size_t l;
+ int i;
+ struct setup_parm *sp = data;
+
+ switch (bcs->proto2) {
+ case ISDN_PROTO_L2_HDLC:
+ proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
+ break;
+ case ISDN_PROTO_L2_TRANS:
+ proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
+ break;
+ default:
+ err("invalid protocol: %u", bcs->proto2);
+ return -EINVAL;
+ }
+
+ switch (sp->si1) {
+ case 1: /* audio */
+ bc = "9090A3"; /* 3.1 kHz audio, A-law */
+ break;
+ case 7: /* data */
+ default: /* hope the app knows what it is doing */
+ bc = "8890"; /* unrestricted digital information */
+ }
+ //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
+
+ length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
+ l = strlen(sp->eazmsn);
+ length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0;
+ length[AT_BC ] = 5 + strlen(bc) + 1 + 1;
+ length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
+ length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
+ length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
+ length[AT_HLC ] = 0;
+
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = NULL;
+ if (length[i] &&
+ !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
+ err("out of memory");
+ return -ENOMEM;
+ }
+ }
+
+ /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
+ if (sp->phone[0] == '*' && sp->phone[1] == '*') {
+ /* internal call: translate ** prefix to CTP value */
+ snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
+ "D%s\r", sp->phone+2);
+ strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
+ } else {
+ snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
+ "D%s\r", sp->phone);
+ strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
+ }
+
+ if (bcs->commands[AT_MSN])
+ snprintf(bcs->commands[AT_MSN], length[AT_MSN], "^SMSN=%s\r", sp->eazmsn);
+ snprintf(bcs->commands[AT_BC ], length[AT_BC ], "^SBC=%s\r", bc);
+ snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
+ snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned)bcs->channel + 1);
+
+ return 0;
+}
+
+int gigaset_isdn_setup_accept(struct at_state_t *at_state)
+{
+ unsigned proto;
+ size_t length[AT_NUM];
+ int i;
+ struct bc_state *bcs = at_state->bcs;
+
+ switch (bcs->proto2) {
+ case ISDN_PROTO_L2_HDLC:
+ proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
+ break;
+ case ISDN_PROTO_L2_TRANS:
+ proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
+ break;
+ default:
+ err("invalid protocol: %u", bcs->proto2);
+ return -EINVAL;
+ }
+
+ length[AT_DIAL ] = 0;
+ length[AT_MSN ] = 0;
+ length[AT_BC ] = 0;
+ length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
+ length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
+ length[AT_TYPE ] = 0;
+ length[AT_HLC ] = 0;
+
+ for (i = 0; i < AT_NUM; ++i) {
+ kfree(bcs->commands[i]);
+ bcs->commands[i] = NULL;
+ if (length[i] &&
+ !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
+ err("out of memory");
+ return -ENOMEM;
+ }
+ }
+
+ snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
+ snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned) bcs->channel + 1);
+
+ return 0;
+}
+
+int gigaset_isdn_icall(struct at_state_t *at_state)
+{
+ struct cardstate *cs = at_state->cs;
+ struct bc_state *bcs = at_state->bcs;
+ isdn_ctrl response;
+ int retval;
+
+ /* fill ICALL structure */
+ response.parm.setup.si1 = 0; /* default: unknown */
+ response.parm.setup.si2 = 0;
+ response.parm.setup.screen = 0; //FIXME how to set these?
+ response.parm.setup.plan = 0;
+ if (!at_state->str_var[STR_ZBC]) {
+ /* no BC (internal call): assume speech, A-law */
+ response.parm.setup.si1 = 1;
+ } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) {
+ /* unrestricted digital information */
+ response.parm.setup.si1 = 7;
+ } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) {
+ /* speech, A-law */
+ response.parm.setup.si1 = 1;
+ } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) {
+ /* 3,1 kHz audio, A-law */
+ response.parm.setup.si1 = 1;
+ response.parm.setup.si2 = 2;
+ } else {
+ warn("RING ignored - unsupported BC %s",
+ at_state->str_var[STR_ZBC]);
+ return ICALL_IGNORE;
+ }
+ if (at_state->str_var[STR_NMBR]) {
+ strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
+ sizeof response.parm.setup.phone - 1);
+ response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
+ } else
+ response.parm.setup.phone[0] = 0;
+ if (at_state->str_var[STR_ZCPN]) {
+ strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
+ sizeof response.parm.setup.eazmsn - 1);
+ response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
+ } else
+ response.parm.setup.eazmsn[0] = 0;
+
+ if (!bcs) {
+ notice("no channel for incoming call");
+ dbg(DEBUG_CMD, "Sending ICALLW");
+ response.command = ISDN_STAT_ICALLW;
+ response.arg = 0; //FIXME
+ } else {
+ dbg(DEBUG_CMD, "Sending ICALL");
+ response.command = ISDN_STAT_ICALL;
+ response.arg = bcs->channel; //FIXME
+ }
+ response.driver = cs->myid;
+ retval = cs->iif.statcallb(&response);
+ dbg(DEBUG_CMD, "Response: %d", retval);
+ switch (retval) {
+ case 0: /* no takers */
+ return ICALL_IGNORE;
+ case 1: /* alerting */
+ bcs->chstate |= CHS_NOTIFY_LL;
+ return ICALL_ACCEPT;
+ case 2: /* reject */
+ return ICALL_REJECT;
+ case 3: /* incomplete */
+ warn("LL requested unsupported feature: Incomplete Number");
+ return ICALL_IGNORE;
+ case 4: /* proceeding */
+ /* Gigaset will send ALERTING anyway.
+ * There doesn't seem to be a way to avoid this.
+ */
+ return ICALL_ACCEPT;
+ case 5: /* deflect */
+ warn("LL requested unsupported feature: Call Deflection");
+ return ICALL_IGNORE;
+ default:
+ err("LL error %d on ICALL", retval);
+ return ICALL_IGNORE;
+ }
+}
+
+/* Set Callback function pointer */
+int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
+{
+ isdn_if *iif = &cs->iif;
+
+ dbg(DEBUG_ANY, "Register driver capabilities to LL");
+
+ //iif->id[sizeof(iif->id) - 1]=0;
+ //strncpy(iif->id, isdnid, sizeof(iif->id) - 1);
+ if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
+ >= sizeof iif->id)
+ return -ENOMEM; //FIXME EINVAL/...??
+
+ iif->owner = THIS_MODULE;
+ iif->channels = cs->channels; /* I am supporting just one channel *//* I was supporting...*/
+ iif->maxbufsize = MAX_BUF_SIZE;
+ iif->features = ISDN_FEATURE_L2_TRANS | /* Our device is very advanced, therefore */
+ ISDN_FEATURE_L2_HDLC |
+#ifdef GIG_X75
+ ISDN_FEATURE_L2_X75I |
+#endif
+ ISDN_FEATURE_L3_TRANS |
+ ISDN_FEATURE_P_EURO;
+ iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */
+ iif->command = command_from_LL;
+ iif->writebuf_skb = writebuf_from_LL;
+ iif->writecmd = NULL; /* Don't support isdnctrl */
+ iif->readstat = NULL; /* Don't support isdnctrl */
+ iif->rcvcallb_skb = NULL; /* Will be set by LL */
+ iif->statcallb = NULL; /* Will be set by LL */
+
+ if (!register_isdn(iif))
+ return 0;
+
+ cs->myid = iif->channels; /* Set my device id */
+ return 1;
+}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
new file mode 100644
index 00000000000..3a81d9c6514
--- /dev/null
+++ b/drivers/isdn/gigaset/interface.c
@@ -0,0 +1,718 @@
+/*
+ * interface to user space for the gigaset driver
+ *
+ * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * Version: $Id: interface.c,v 1.14.4.15 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/gigaset_dev.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/*** our ioctls ***/
+
+static int if_lock(struct cardstate *cs, int *arg)
+{
+ int cmd = *arg;
+
+ dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
+
+ if (cmd > 1)
+ return -EINVAL;
+
+ if (cmd < 0) {
+ *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove?
+ return 0;
+ }
+
+ if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED
+ && atomic_read(&cs->connected)) {
+ cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
+ cs->ops->baud_rate(cs, B115200);
+ cs->ops->set_line_ctrl(cs, CS8);
+ cs->control_state = TIOCM_DTR|TIOCM_RTS;
+ }
+
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
+ NULL, cmd, NULL)) {
+ cs->waiting = 0;
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling IF_LOCK");
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ if (cs->cmd_result >= 0) {
+ *arg = cs->cmd_result;
+ return 0;
+ }
+
+ return cs->cmd_result;
+}
+
+static int if_version(struct cardstate *cs, unsigned arg[4])
+{
+ static const unsigned version[4] = GIG_VERSION;
+ static const unsigned compat[4] = GIG_COMPAT;
+ unsigned cmd = arg[0];
+
+ dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
+
+ switch (cmd) {
+ case GIGVER_DRIVER:
+ memcpy(arg, version, sizeof version);
+ return 0;
+ case GIGVER_COMPAT:
+ memcpy(arg, compat, sizeof compat);
+ return 0;
+ case GIGVER_FWBASE:
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
+ NULL, 0, arg)) {
+ cs->waiting = 0;
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling IF_VER");
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ if (cs->cmd_result >= 0)
+ return 0;
+
+ return cs->cmd_result;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int if_config(struct cardstate *cs, int *arg)
+{
+ dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
+
+ if (*arg != 1)
+ return -EINVAL;
+
+ if (atomic_read(&cs->mstate) != MS_LOCKED)
+ return -EBUSY;
+
+ *arg = 0;
+ return gigaset_enterconfigmode(cs);
+}
+
+/*** the terminal driver ***/
+/* stolen from usbserial and some other tty drivers */
+
+static int if_open(struct tty_struct *tty, struct file *filp);
+static void if_close(struct tty_struct *tty, struct file *filp);
+static int if_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int if_write_room(struct tty_struct *tty);
+static int if_chars_in_buffer(struct tty_struct *tty);
+static void if_throttle(struct tty_struct *tty);
+static void if_unthrottle(struct tty_struct *tty);
+static void if_set_termios(struct tty_struct *tty, struct termios *old);
+static int if_tiocmget(struct tty_struct *tty, struct file *file);
+static int if_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear);
+static int if_write(struct tty_struct *tty,
+ const unsigned char *buf, int count);
+
+static struct tty_operations if_ops = {
+ .open = if_open,
+ .close = if_close,
+ .ioctl = if_ioctl,
+ .write = if_write,
+ .write_room = if_write_room,
+ .chars_in_buffer = if_chars_in_buffer,
+ .set_termios = if_set_termios,
+ .throttle = if_throttle,
+ .unthrottle = if_unthrottle,
+#if 0
+ .break_ctl = serial_break,
+#endif
+ .tiocmget = if_tiocmget,
+ .tiocmset = if_tiocmset,
+};
+
+static int if_open(struct tty_struct *tty, struct file *filp)
+{
+ struct cardstate *cs;
+ unsigned long flags;
+
+ dbg(DEBUG_IF, "%d+%d: %s()", tty->driver->minor_start, tty->index,
+ __FUNCTION__);
+
+ tty->driver_data = NULL;
+
+ cs = gigaset_get_cs_by_tty(tty);
+ if (!cs)
+ return -ENODEV;
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+ tty->driver_data = cs;
+
+ ++cs->open_count;
+
+ if (cs->open_count == 1) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = tty;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ tty->low_latency = 1; //FIXME test
+ //FIXME
+ }
+
+ up(&cs->sem);
+ return 0;
+}
+
+static void if_close(struct tty_struct *tty, struct file *filp)
+{
+ struct cardstate *cs;
+ unsigned long flags;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ down(&cs->sem);
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else {
+ if (!--cs->open_count) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = NULL;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ //FIXME
+ }
+ }
+
+ up(&cs->sem);
+}
+
+static int if_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cardstate *cs;
+ int retval = -ENODEV;
+ int int_arg;
+ unsigned char buf[6];
+ unsigned version[4];
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __FUNCTION__, cmd);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else {
+ retval = 0;
+ switch (cmd) {
+ case GIGASET_REDIR:
+ retval = get_user(int_arg, (int __user *) arg);
+ if (retval >= 0)
+ retval = if_lock(cs, &int_arg);
+ if (retval >= 0)
+ retval = put_user(int_arg, (int __user *) arg);
+ break;
+ case GIGASET_CONFIG:
+ retval = get_user(int_arg, (int __user *) arg);
+ if (retval >= 0)
+ retval = if_config(cs, &int_arg);
+ if (retval >= 0)
+ retval = put_user(int_arg, (int __user *) arg);
+ break;
+ case GIGASET_BRKCHARS:
+ //FIXME test if MS_LOCKED
+ gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
+ 6, (const unsigned char *) arg, 1);
+ if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't communicate with unplugged device");
+ retval = -ENODEV;
+ break;
+ }
+ retval = copy_from_user(&buf,
+ (const unsigned char __user *) arg, 6)
+ ? -EFAULT : 0;
+ if (retval >= 0)
+ retval = cs->ops->brkchars(cs, buf);
+ break;
+ case GIGASET_VERSION:
+ retval = copy_from_user(version, (unsigned __user *) arg,
+ sizeof version) ? -EFAULT : 0;
+ if (retval >= 0)
+ retval = if_version(cs, version);
+ if (retval >= 0)
+ retval = copy_to_user((unsigned __user *) arg, version,
+ sizeof version)
+ ? -EFAULT : 0;
+ break;
+ default:
+ dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x",
+ __FUNCTION__, cmd);
+ retval = -ENOIOCTLCMD;
+ }
+ }
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static int if_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct cardstate *cs;
+ int retval;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ // FIXME read from device?
+ retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static int if_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct cardstate *cs;
+ int retval;
+ unsigned mc;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF,
+ "%u: %s(0x%x, 0x%x)", cs->minor_index, __FUNCTION__, set, clear);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't communicate with unplugged device");
+ retval = -ENODEV;
+ } else {
+ mc = (cs->control_state | set) & ~clear & (TIOCM_RTS|TIOCM_DTR);
+ retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
+ cs->control_state = mc;
+ }
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct cardstate *cs;
+ int retval = -ENODEV;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ warn("can't write to unlocked device");
+ retval = -EBUSY;
+ } else if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't write to unplugged device");
+ retval = -EBUSY; //FIXME
+ } else {
+ retval = cs->ops->write_cmd(cs, buf, count,
+ &cs->if_wake_tasklet);
+ }
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static int if_write_room(struct tty_struct *tty)
+{
+ struct cardstate *cs;
+ int retval = -ENODEV;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ warn("can't write to unlocked device");
+ retval = -EBUSY; //FIXME
+ } else if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't write to unplugged device");
+ retval = -EBUSY; //FIXME
+ } else
+ retval = cs->ops->write_room(cs);
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static int if_chars_in_buffer(struct tty_struct *tty)
+{
+ struct cardstate *cs;
+ int retval = -ENODEV;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ warn("can't write to unlocked device");
+ retval = -EBUSY;
+ } else if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't write to unplugged device");
+ retval = -EBUSY; //FIXME
+ } else
+ retval = cs->ops->chars_in_buffer(cs);
+
+ up(&cs->sem);
+
+ return retval;
+}
+
+static void if_throttle(struct tty_struct *tty)
+{
+ struct cardstate *cs;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ down(&cs->sem);
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else {
+ //FIXME
+ }
+
+ up(&cs->sem);
+}
+
+static void if_unthrottle(struct tty_struct *tty)
+{
+ struct cardstate *cs;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ down(&cs->sem);
+
+ if (!cs->open_count)
+ warn("%s: device not opened", __FUNCTION__);
+ else {
+ //FIXME
+ }
+
+ up(&cs->sem);
+}
+
+static void if_set_termios(struct tty_struct *tty, struct termios *old)
+{
+ struct cardstate *cs;
+ unsigned int iflag;
+ unsigned int cflag;
+ unsigned int old_cflag;
+ unsigned int control_state, new_state;
+
+ cs = (struct cardstate *) tty->driver_data;
+ if (!cs) {
+ err("cs==NULL in %s", __FUNCTION__);
+ return;
+ }
+
+ dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
+
+ down(&cs->sem);
+
+ if (!cs->open_count) {
+ warn("%s: device not opened", __FUNCTION__);
+ goto out;
+ }
+
+ if (!atomic_read(&cs->connected)) {
+ dbg(DEBUG_ANY, "can't communicate with unplugged device");
+ goto out;
+ }
+
+ // stolen from mct_u232.c
+ iflag = tty->termios->c_iflag;
+ cflag = tty->termios->c_cflag;
+ old_cflag = old ? old->c_cflag : cflag; //FIXME?
+ dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", cs->minor_index,
+ iflag, cflag, old_cflag);
+
+ /* get a local copy of the current port settings */
+ control_state = cs->control_state;
+
+ /*
+ * Update baud rate.
+ * Do not attempt to cache old rates and skip settings,
+ * disconnects screw such tricks up completely.
+ * Premature optimization is the root of all evil.
+ */
+
+ /* reassert DTR and (maybe) RTS on transition from B0 */
+ if ((old_cflag & CBAUD) == B0) {
+ new_state = control_state | TIOCM_DTR;
+ /* don't set RTS if using hardware flow control */
+ if (!(old_cflag & CRTSCTS))
+ new_state |= TIOCM_RTS;
+ dbg(DEBUG_IF, "%u: from B0 - set DTR%s", cs->minor_index,
+ (new_state & TIOCM_RTS) ? " only" : "/RTS");
+ cs->ops->set_modem_ctrl(cs, control_state, new_state);
+ control_state = new_state;
+ }
+
+ cs->ops->baud_rate(cs, cflag & CBAUD);
+
+ if ((cflag & CBAUD) == B0) {
+ /* Drop RTS and DTR */
+ dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
+ new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
+ cs->ops->set_modem_ctrl(cs, control_state, new_state);
+ control_state = new_state;
+ }
+
+ /*
+ * Update line control register (LCR)
+ */
+
+ cs->ops->set_line_ctrl(cs, cflag);
+
+#if 0
+ //FIXME this hangs M101 [ts 2005-03-09]
+ //FIXME do we need this?
+ /*
+ * Set flow control: well, I do not really now how to handle DTR/RTS.
+ * Just do what we have seen with SniffUSB on Win98.
+ */
+ /* Drop DTR/RTS if no flow control otherwise assert */
+ dbg(DEBUG_IF, "%u: control_state %x", cs->minor_index, control_state);
+ new_state = control_state;
+ if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS))
+ new_state |= TIOCM_DTR | TIOCM_RTS;
+ else
+ new_state &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (new_state != control_state) {
+ dbg(DEBUG_IF, "%u: new_state %x", cs->minor_index, new_state);
+ gigaset_set_modem_ctrl(cs, control_state, new_state); // FIXME: mct_u232.c sets the old state here. is this a bug?
+ control_state = new_state;
+ }
+#endif
+
+ /* save off the modified port settings */
+ cs->control_state = control_state;
+
+out:
+ up(&cs->sem);
+}
+
+
+/* wakeup tasklet for the write operation */
+static void if_wake(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct tty_struct *tty;
+
+ tty = cs->tty;
+ if (!tty)
+ return;
+
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ tty->ldisc.write_wakeup) {
+ dbg(DEBUG_IF, "write wakeup call");
+ tty->ldisc.write_wakeup(tty);
+ }
+
+ wake_up_interruptible(&tty->write_wait);
+}
+
+/*** interface to common ***/
+
+void gigaset_if_init(struct cardstate *cs)
+{
+ struct gigaset_driver *drv;
+
+ drv = cs->driver;
+ if (!drv->have_tty)
+ return;
+
+ tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs);
+ tty_register_device(drv->tty, cs->minor_index, NULL);
+}
+
+void gigaset_if_free(struct cardstate *cs)
+{
+ struct gigaset_driver *drv;
+
+ drv = cs->driver;
+ if (!drv->have_tty)
+ return;
+
+ tasklet_disable(&cs->if_wake_tasklet);
+ tasklet_kill(&cs->if_wake_tasklet);
+ tty_unregister_device(drv->tty, cs->minor_index);
+}
+
+void gigaset_if_receive(struct cardstate *cs,
+ unsigned char *buffer, size_t len)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if ((tty = cs->tty) == NULL)
+ dbg(DEBUG_ANY, "receive on closed device");
+ else {
+ tty_buffer_request_room(tty, len);
+ tty_insert_flip_string(tty, buffer, len);
+ tty_flip_buffer_push(tty);
+ }
+ spin_unlock_irqrestore(&cs->lock, flags);
+}
+EXPORT_SYMBOL_GPL(gigaset_if_receive);
+
+/* gigaset_if_initdriver
+ * Initialize tty interface.
+ * parameters:
+ * drv Driver
+ * procname Name of the driver (e.g. for /proc/tty/drivers)
+ * devname Name of the device files (prefix without minor number)
+ * devfsname Devfs name of the device files without %d
+ */
+void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
+ const char *devname, const char *devfsname)
+{
+ unsigned minors = drv->minors;
+ int ret;
+ struct tty_driver *tty;
+
+ drv->have_tty = 0;
+
+ if ((drv->tty = alloc_tty_driver(minors)) == NULL)
+ goto enomem;
+ tty = drv->tty;
+
+ tty->magic = TTY_DRIVER_MAGIC,
+ tty->major = GIG_MAJOR,
+ tty->type = TTY_DRIVER_TYPE_SERIAL,
+ tty->subtype = SERIAL_TYPE_NORMAL,
+ tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS,
+
+ tty->driver_name = procname;
+ tty->name = devname;
+ tty->minor_start = drv->minor;
+ tty->num = drv->minors;
+
+ tty->owner = THIS_MODULE;
+ tty->devfs_name = devfsname;
+
+ tty->init_termios = tty_std_termios; //FIXME
+ tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME
+ tty_set_operations(tty, &if_ops);
+
+ ret = tty_register_driver(tty);
+ if (ret < 0) {
+ warn("failed to register tty driver (error %d)", ret);
+ goto error;
+ }
+ dbg(DEBUG_IF, "tty driver initialized");
+ drv->have_tty = 1;
+ return;
+
+enomem:
+ warn("could not allocate tty structures");
+error:
+ if (drv->tty)
+ put_tty_driver(drv->tty);
+}
+
+void gigaset_if_freedriver(struct gigaset_driver *drv)
+{
+ if (!drv->have_tty)
+ return;
+
+ drv->have_tty = 0;
+ tty_unregister_driver(drv->tty);
+ put_tty_driver(drv->tty);
+}
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
new file mode 100644
index 00000000000..5744eb91b31
--- /dev/null
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -0,0 +1,1009 @@
+/*
+ * Common data handling layer for bas_gigaset
+ *
+ * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
+ * Hansjoerg Lipp <hjlipp@web.de>.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: isocdata.c,v 1.2.2.5 2005/11/13 23:05:19 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/crc-ccitt.h>
+
+/* access methods for isowbuf_t */
+/* ============================ */
+
+/* initialize buffer structure
+ */
+void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
+{
+ atomic_set(&iwb->read, 0);
+ atomic_set(&iwb->nextread, 0);
+ atomic_set(&iwb->write, 0);
+ atomic_set(&iwb->writesem, 1);
+ iwb->wbits = 0;
+ iwb->idle = idle;
+ memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
+}
+
+/* compute number of bytes which can be appended to buffer
+ * so that there is still room to append a maximum frame of flags
+ */
+static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
+{
+ int read, write, freebytes;
+
+ read = atomic_read(&iwb->read);
+ write = atomic_read(&iwb->write);
+ if ((freebytes = read - write) > 0) {
+ /* no wraparound: need padding space within regular area */
+ return freebytes - BAS_OUTBUFPAD;
+ } else if (read < BAS_OUTBUFPAD) {
+ /* wraparound: can use space up to end of regular area */
+ return BAS_OUTBUFSIZE - write;
+ } else {
+ /* following the wraparound yields more space */
+ return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
+ }
+}
+
+/* compare two offsets within the buffer
+ * The buffer is seen as circular, with the read position as start
+ * returns -1/0/1 if position a </=/> position b without crossing 'read'
+ */
+static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
+{
+ int read;
+ if (a == b)
+ return 0;
+ read = atomic_read(&iwb->read);
+ if (a < b) {
+ if (a < read && read <= b)
+ return +1;
+ else
+ return -1;
+ } else {
+ if (b < read && read <= a)
+ return -1;
+ else
+ return +1;
+ }
+}
+
+/* start writing
+ * acquire the write semaphore
+ * return true if acquired, false if busy
+ */
+static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
+{
+ if (!atomic_dec_and_test(&iwb->writesem)) {
+ atomic_inc(&iwb->writesem);
+ dbg(DEBUG_ISO,
+ "%s: couldn't acquire iso write semaphore", __func__);
+ return 0;
+ }
+#ifdef CONFIG_GIGASET_DEBUG
+ dbg(DEBUG_ISO,
+ "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
+ __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits);
+#endif
+ return 1;
+}
+
+/* finish writing
+ * release the write semaphore and update the maximum buffer fill level
+ * returns the current write position
+ */
+static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
+{
+ int write = atomic_read(&iwb->write);
+ atomic_inc(&iwb->writesem);
+ return write;
+}
+
+/* append bits to buffer without any checks
+ * - data contains bits to append, starting at LSB
+ * - nbits is number of bits to append (0..24)
+ * must be called with the write semaphore held
+ * If more than nbits bits are set in data, the extraneous bits are set in the
+ * buffer too, but the write position is only advanced by nbits.
+ */
+static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
+{
+ int write = atomic_read(&iwb->write);
+ data <<= iwb->wbits;
+ data |= iwb->data[write];
+ nbits += iwb->wbits;
+ while (nbits >= 8) {
+ iwb->data[write++] = data & 0xff;
+ write %= BAS_OUTBUFSIZE;
+ data >>= 8;
+ nbits -= 8;
+ }
+ iwb->wbits = nbits;
+ iwb->data[write] = data & 0xff;
+ atomic_set(&iwb->write, write);
+}
+
+/* put final flag on HDLC bitstream
+ * also sets the idle fill byte to the correspondingly shifted flag pattern
+ * must be called with the write semaphore held
+ */
+static inline void isowbuf_putflag(struct isowbuf_t *iwb)
+{
+ int write;
+
+ /* add two flags, thus reliably covering one byte */
+ isowbuf_putbits(iwb, 0x7e7e, 8);
+ /* recover the idle flag byte */
+ write = atomic_read(&iwb->write);
+ iwb->idle = iwb->data[write];
+ dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
+ /* mask extraneous bits in buffer */
+ iwb->data[write] &= (1 << iwb->wbits) - 1;
+}
+
+/* retrieve a block of bytes for sending
+ * The requested number of bytes is provided as a contiguous block.
+ * If necessary, the frame is filled to the requested number of bytes
+ * with the idle value.
+ * returns offset to frame, < 0 on busy or error
+ */
+int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
+{
+ int read, write, limit, src, dst;
+ unsigned char pbyte;
+
+ read = atomic_read(&iwb->nextread);
+ write = atomic_read(&iwb->write);
+ if (likely(read == write)) {
+ //dbg(DEBUG_STREAM, "%s: send buffer empty", __func__);
+ /* return idle frame */
+ return read < BAS_OUTBUFPAD ?
+ BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
+ }
+
+ limit = read + size;
+ dbg(DEBUG_STREAM,
+ "%s: read=%d write=%d limit=%d", __func__, read, write, limit);
+#ifdef CONFIG_GIGASET_DEBUG
+ if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
+ err("invalid size %d", size);
+ return -EINVAL;
+ }
+ src = atomic_read(&iwb->read);
+ if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
+ (read < src && limit >= src))) {
+ err("isoc write buffer frame reservation violated");
+ return -EFAULT;
+ }
+#endif
+
+ if (read < write) {
+ /* no wraparound in valid data */
+ if (limit >= write) {
+ /* append idle frame */
+ if (!isowbuf_startwrite(iwb))
+ return -EBUSY;
+ /* write position could have changed */
+ if (limit >= (write = atomic_read(&iwb->write))) {
+ pbyte = iwb->data[write]; /* save partial byte */
+ limit = write + BAS_OUTBUFPAD;
+ dbg(DEBUG_STREAM,
+ "%s: filling %d->%d with %02x",
+ __func__, write, limit, iwb->idle);
+ if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
+ memset(iwb->data + write, iwb->idle,
+ BAS_OUTBUFPAD);
+ else {
+ /* wraparound, fill entire pad area */
+ memset(iwb->data + write, iwb->idle,
+ BAS_OUTBUFSIZE + BAS_OUTBUFPAD
+ - write);
+ limit = 0;
+ }
+ dbg(DEBUG_STREAM, "%s: restoring %02x at %d",
+ __func__, pbyte, limit);
+ iwb->data[limit] = pbyte; /* restore partial byte */
+ atomic_set(&iwb->write, limit);
+ }
+ isowbuf_donewrite(iwb);
+ }
+ } else {
+ /* valid data wraparound */
+ if (limit >= BAS_OUTBUFSIZE) {
+ /* copy wrapped part into pad area */
+ src = 0;
+ dst = BAS_OUTBUFSIZE;
+ while (dst < limit && src < write)
+ iwb->data[dst++] = iwb->data[src++];
+ if (dst <= limit) {
+ /* fill pad area with idle byte */
+ memset(iwb->data + dst, iwb->idle,
+ BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
+ }
+ limit = src;
+ }
+ }
+ atomic_set(&iwb->nextread, limit);
+ return read;
+}
+
+/* dump_bytes
+ * write hex bytes to syslog for debugging
+ */
+static inline void dump_bytes(enum debuglevel level, const char *tag,
+ unsigned char *bytes, int count)
+{
+#ifdef CONFIG_GIGASET_DEBUG
+ unsigned char c;
+ static char dbgline[3 * 32 + 1];
+ static const char hexdigit[] = "0123456789abcdef";
+ int i = 0;
+ IFNULLRET(tag);
+ IFNULLRET(bytes);
+ while (count-- > 0) {
+ if (i > sizeof(dbgline) - 4) {
+ dbgline[i] = '\0';
+ dbg(level, "%s:%s", tag, dbgline);
+ i = 0;
+ }
+ c = *bytes++;
+ dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
+ i++;
+ dbgline[i++] = hexdigit[(c >> 4) & 0x0f];
+ dbgline[i++] = hexdigit[c & 0x0f];
+ }
+ dbgline[i] = '\0';
+ dbg(level, "%s:%s", tag, dbgline);
+#endif
+}
+
+/*============================================================================*/
+
+/* bytewise HDLC bitstuffing via table lookup
+ * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
+ * index: 256*(number of preceding '1' bits) + (next byte to stuff)
+ * value: bit 9.. 0 = result bits
+ * bit 12..10 = number of trailing '1' bits in result
+ * bit 14..13 = number of bits added by stuffing
+ */
+static u16 stufftab[5 * 256] = {
+// previous 1s = 0:
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
+
+// previous 1s = 1:
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
+
+// previous 1s = 2:
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
+ 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
+ 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
+ 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
+ 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
+ 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
+ 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
+ 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
+
+// previous 1s = 3:
+ 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
+ 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
+ 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
+ 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
+ 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
+ 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
+ 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
+ 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
+ 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
+ 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
+ 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
+ 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
+ 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
+ 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
+ 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
+ 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
+
+// previous 1s = 4:
+ 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
+ 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
+ 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
+ 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
+ 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
+ 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
+ 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
+ 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
+ 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
+ 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
+ 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
+ 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
+ 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
+ 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
+ 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
+ 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
+};
+
+/* hdlc_bitstuff_byte
+ * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
+ * parameters:
+ * cin input byte
+ * ones number of trailing '1' bits in result before this step
+ * iwb pointer to output buffer structure (write semaphore must be held)
+ * return value:
+ * number of trailing '1' bits in result after this step
+ */
+
+static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
+ int ones)
+{
+ u16 stuff;
+ int shiftinc, newones;
+
+ /* get stuffing information for input byte
+ * value: bit 9.. 0 = result bits
+ * bit 12..10 = number of trailing '1' bits in result
+ * bit 14..13 = number of bits added by stuffing
+ */
+ stuff = stufftab[256 * ones + cin];
+ shiftinc = (stuff >> 13) & 3;
+ newones = (stuff >> 10) & 7;
+ stuff &= 0x3ff;
+
+ /* append stuffed byte to output stream */
+ isowbuf_putbits(iwb, stuff, 8 + shiftinc);
+ return newones;
+}
+
+/* hdlc_buildframe
+ * Perform HDLC framing with bitstuffing on a byte buffer
+ * The input buffer is regarded as a sequence of bits, starting with the least
+ * significant bit of the first byte and ending with the most significant bit
+ * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
+ * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
+ * '0' bit is inserted after them.
+ * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
+ * are appended to the output buffer starting at the given bit position, which
+ * is assumed to already contain a leading flag.
+ * The output buffer must have sufficient length; count + count/5 + 6 bytes
+ * starting at *out are safe and are verified to be present.
+ * parameters:
+ * in input buffer
+ * count number of bytes in input buffer
+ * iwb pointer to output buffer structure (write semaphore must be held)
+ * return value:
+ * position of end of packet in output buffer on success,
+ * -EAGAIN if write semaphore busy or buffer full
+ */
+
+static inline int hdlc_buildframe(struct isowbuf_t *iwb,
+ unsigned char *in, int count)
+{
+ int ones;
+ u16 fcs;
+ int end;
+ unsigned char c;
+
+ if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
+ !isowbuf_startwrite(iwb)) {
+ dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
+ __func__, isowbuf_freebytes(iwb));
+ return -EAGAIN;
+ }
+
+ dump_bytes(DEBUG_STREAM, "snd data", in, count);
+
+ /* bitstuff and checksum input data */
+ fcs = PPP_INITFCS;
+ ones = 0;
+ while (count-- > 0) {
+ c = *in++;
+ ones = hdlc_bitstuff_byte(iwb, c, ones);
+ fcs = crc_ccitt_byte(fcs, c);
+ }
+
+ /* bitstuff and append FCS (complemented, least significant byte first) */
+ fcs ^= 0xffff;
+ ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
+ ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
+
+ /* put closing flag and repeat byte for flag idle */
+ isowbuf_putflag(iwb);
+ end = isowbuf_donewrite(iwb);
+ dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
+ return end;
+}
+
+/* trans_buildframe
+ * Append a block of 'transparent' data to the output buffer,
+ * inverting the bytes.
+ * The output buffer must have sufficient length; count bytes
+ * starting at *out are safe and are verified to be present.
+ * parameters:
+ * in input buffer
+ * count number of bytes in input buffer
+ * iwb pointer to output buffer structure (write semaphore must be held)
+ * return value:
+ * position of end of packet in output buffer on success,
+ * -EAGAIN if write semaphore busy or buffer full
+ */
+
+static inline int trans_buildframe(struct isowbuf_t *iwb,
+ unsigned char *in, int count)
+{
+ int write;
+ unsigned char c;
+
+ if (unlikely(count <= 0))
+ return atomic_read(&iwb->write); /* better ideas? */
+
+ if (isowbuf_freebytes(iwb) < count ||
+ !isowbuf_startwrite(iwb)) {
+ dbg(DEBUG_ISO, "can't put %d bytes", count);
+ return -EAGAIN;
+ }
+
+ dbg(DEBUG_STREAM, "put %d bytes", count);
+ write = atomic_read(&iwb->write);
+ do {
+ c = gigaset_invtab[*in++];
+ iwb->data[write++] = c;
+ write %= BAS_OUTBUFSIZE;
+ } while (--count > 0);
+ atomic_set(&iwb->write, write);
+ iwb->idle = c;
+
+ return isowbuf_donewrite(iwb);
+}
+
+int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
+{
+ int result;
+
+ switch (bcs->proto2) {
+ case ISDN_PROTO_L2_HDLC:
+ result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
+ dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", __func__, len, result);
+ break;
+ default: /* assume transparent */
+ result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
+ dbg(DEBUG_ISO, "%s: %d bytes trans -> %d", __func__, len, result);
+ }
+ return result;
+}
+
+/* hdlc_putbyte
+ * append byte c to current skb of B channel structure *bcs, updating fcs
+ */
+static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
+{
+ bcs->fcs = crc_ccitt_byte(bcs->fcs, c);
+ if (unlikely(bcs->skb == NULL)) {
+ /* skipping */
+ return;
+ }
+ if (unlikely(bcs->skb->len == SBUFSIZE)) {
+ warn("received oversized packet discarded");
+ bcs->hw.bas->giants++;
+ dev_kfree_skb_any(bcs->skb);
+ bcs->skb = NULL;
+ return;
+ }
+ *gigaset_skb_put_quick(bcs->skb, 1) = c;
+}
+
+/* hdlc_flush
+ * drop partial HDLC data packet
+ */
+static inline void hdlc_flush(struct bc_state *bcs)
+{
+ /* clear skb or allocate new if not skipping */
+ if (likely(bcs->skb != NULL))
+ skb_trim(bcs->skb, 0);
+ else if (!bcs->ignore) {
+ if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
+ skb_reserve(bcs->skb, HW_HDR_LEN);
+ else
+ err("could not allocate skb");
+ }
+
+ /* reset packet state */
+ bcs->fcs = PPP_INITFCS;
+}
+
+/* hdlc_done
+ * process completed HDLC data packet
+ */
+static inline void hdlc_done(struct bc_state *bcs)
+{
+ struct sk_buff *procskb;
+
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ hdlc_flush(bcs);
+ return;
+ }
+
+ if ((procskb = bcs->skb) == NULL) {
+ /* previous error */
+ dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
+ gigaset_rcv_error(NULL, bcs->cs, bcs);
+ } else if (procskb->len < 2) {
+ notice("received short frame (%d octets)", procskb->len);
+ bcs->hw.bas->runts++;
+ gigaset_rcv_error(procskb, bcs->cs, bcs);
+ } else if (bcs->fcs != PPP_GOODFCS) {
+ notice("frame check error (0x%04x)", bcs->fcs);
+ bcs->hw.bas->fcserrs++;
+ gigaset_rcv_error(procskb, bcs->cs, bcs);
+ } else {
+ procskb->len -= 2; /* subtract FCS */
+ procskb->tail -= 2;
+ dbg(DEBUG_ISO,
+ "%s: good frame (%d octets)", __func__, procskb->len);
+ dump_bytes(DEBUG_STREAM,
+ "rcv data", procskb->data, procskb->len);
+ bcs->hw.bas->goodbytes += procskb->len;
+ gigaset_rcv_skb(procskb, bcs->cs, bcs);
+ }
+
+ if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
+ skb_reserve(bcs->skb, HW_HDR_LEN);
+ else
+ err("could not allocate skb");
+ bcs->fcs = PPP_INITFCS;
+}
+
+/* hdlc_frag
+ * drop HDLC data packet with non-integral last byte
+ */
+static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
+{
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ hdlc_flush(bcs);
+ return;
+ }
+
+ notice("received partial byte (%d bits)", inbits);
+ bcs->hw.bas->alignerrs++;
+ gigaset_rcv_error(bcs->skb, bcs->cs, bcs);
+
+ if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
+ skb_reserve(bcs->skb, HW_HDR_LEN);
+ else
+ err("could not allocate skb");
+ bcs->fcs = PPP_INITFCS;
+}
+
+/* bit counts lookup table for HDLC bit unstuffing
+ * index: input byte
+ * value: bit 0..3 = number of consecutive '1' bits starting from LSB
+ * bit 4..6 = number of consecutive '1' bits starting from MSB
+ * (replacing 8 by 7 to make it fit; the algorithm won't care)
+ * bit 7 set if there are 5 or more "interior" consecutive '1' bits
+ */
+static unsigned char bitcounts[256] = {
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
+ 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
+ 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
+ 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
+ 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
+ 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
+};
+
+/* hdlc_unpack
+ * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation)
+ * on a sequence of received data bytes (8 bits each, LSB first)
+ * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb
+ * notify of errors via gigaset_rcv_error
+ * tally frames, errors etc. in BC structure counters
+ * parameters:
+ * src received data
+ * count number of received bytes
+ * bcs receiving B channel structure
+ */
+static inline void hdlc_unpack(unsigned char *src, unsigned count,
+ struct bc_state *bcs)
+{
+ struct bas_bc_state *ubc;
+ int inputstate;
+ unsigned seqlen, inbyte, inbits;
+
+ IFNULLRET(bcs);
+ ubc = bcs->hw.bas;
+ IFNULLRET(ubc);
+
+ /* load previous state:
+ * inputstate = set of flag bits:
+ * - INS_flag_hunt: no complete opening flag received since connection setup or last abort
+ * - INS_have_data: at least one complete data byte received since last flag
+ * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7)
+ * inbyte = accumulated partial data byte (if !INS_flag_hunt)
+ * inbits = number of valid bits in inbyte, starting at LSB (0..6)
+ */
+ inputstate = bcs->inputstate;
+ seqlen = ubc->seqlen;
+ inbyte = ubc->inbyte;
+ inbits = ubc->inbits;
+
+ /* bit unstuffing a byte a time
+ * Take your time to understand this; it's straightforward but tedious.
+ * The "bitcounts" lookup table is used to speed up the counting of
+ * leading and trailing '1' bits.
+ */
+ while (count--) {
+ unsigned char c = *src++;
+ unsigned char tabentry = bitcounts[c];
+ unsigned lead1 = tabentry & 0x0f;
+ unsigned trail1 = (tabentry >> 4) & 0x0f;
+
+ seqlen += lead1;
+
+ if (unlikely(inputstate & INS_flag_hunt)) {
+ if (c == PPP_FLAG) {
+ /* flag-in-one */
+ inputstate &= ~(INS_flag_hunt | INS_have_data);
+ inbyte = 0;
+ inbits = 0;
+ } else if (seqlen == 6 && trail1 != 7) {
+ /* flag completed & not followed by abort */
+ inputstate &= ~(INS_flag_hunt | INS_have_data);
+ inbyte = c >> (lead1 + 1);
+ inbits = 7 - lead1;
+ if (trail1 >= 8) {
+ /* interior stuffing: omitting the MSB handles most cases */
+ inbits--;
+ /* correct the incorrectly handled cases individually */
+ switch (c) {
+ case 0xbe:
+ inbyte = 0x3f;
+ break;
+ }
+ }
+ }
+ /* else: continue flag-hunting */
+ } else if (likely(seqlen < 5 && trail1 < 7)) {
+ /* streamlined case: 8 data bits, no stuffing */
+ inbyte |= c << inbits;
+ hdlc_putbyte(inbyte & 0xff, bcs);
+ inputstate |= INS_have_data;
+ inbyte >>= 8;
+ /* inbits unchanged */
+ } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
+ trail1 + 1 == inbits &&
+ !(inputstate & INS_have_data))) {
+ /* streamlined case: flag idle - state unchanged */
+ } else if (unlikely(seqlen > 6)) {
+ /* abort sequence */
+ ubc->aborts++;
+ hdlc_flush(bcs);
+ inputstate |= INS_flag_hunt;
+ } else if (seqlen == 6) {
+ /* closing flag, including (6 - lead1) '1's and one '0' from inbits */
+ if (inbits > 7 - lead1) {
+ hdlc_frag(bcs, inbits + lead1 - 7);
+ inputstate &= ~INS_have_data;
+ } else {
+ if (inbits < 7 - lead1)
+ ubc->stolen0s ++;
+ if (inputstate & INS_have_data) {
+ hdlc_done(bcs);
+ inputstate &= ~INS_have_data;
+ }
+ }
+
+ if (c == PPP_FLAG) {
+ /* complete flag, LSB overlaps preceding flag */
+ ubc->shared0s ++;
+ inbits = 0;
+ inbyte = 0;
+ } else if (trail1 != 7) {
+ /* remaining bits */
+ inbyte = c >> (lead1 + 1);
+ inbits = 7 - lead1;
+ if (trail1 >= 8) {
+ /* interior stuffing: omitting the MSB handles most cases */
+ inbits--;
+ /* correct the incorrectly handled cases individually */
+ switch (c) {
+ case 0xbe:
+ inbyte = 0x3f;
+ break;
+ }
+ }
+ } else {
+ /* abort sequence follows, skb already empty anyway */
+ ubc->aborts++;
+ inputstate |= INS_flag_hunt;
+ }
+ } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
+
+ if (c == PPP_FLAG) {
+ /* complete flag */
+ if (seqlen == 5)
+ ubc->stolen0s++;
+ if (inbits) {
+ hdlc_frag(bcs, inbits);
+ inbits = 0;
+ inbyte = 0;
+ } else if (inputstate & INS_have_data)
+ hdlc_done(bcs);
+ inputstate &= ~INS_have_data;
+ } else if (trail1 == 7) {
+ /* abort sequence */
+ ubc->aborts++;
+ hdlc_flush(bcs);
+ inputstate |= INS_flag_hunt;
+ } else {
+ /* stuffed data */
+ if (trail1 < 7) { /* => seqlen == 5 */
+ /* stuff bit at position lead1, no interior stuffing */
+ unsigned char mask = (1 << lead1) - 1;
+ c = (c & mask) | ((c & ~mask) >> 1);
+ inbyte |= c << inbits;
+ inbits += 7;
+ } else if (seqlen < 5) { /* trail1 >= 8 */
+ /* interior stuffing: omitting the MSB handles most cases */
+ /* correct the incorrectly handled cases individually */
+ switch (c) {
+ case 0xbe:
+ c = 0x7e;
+ break;
+ }
+ inbyte |= c << inbits;
+ inbits += 7;
+ } else { /* seqlen == 5 && trail1 >= 8 */
+
+ /* stuff bit at lead1 *and* interior stuffing */
+ switch (c) { /* unstuff individually */
+ case 0x7d:
+ c = 0x3f;
+ break;
+ case 0xbe:
+ c = 0x3f;
+ break;
+ case 0x3e:
+ c = 0x1f;
+ break;
+ case 0x7c:
+ c = 0x3e;
+ break;
+ }
+ inbyte |= c << inbits;
+ inbits += 6;
+ }
+ if (inbits >= 8) {
+ inbits -= 8;
+ hdlc_putbyte(inbyte & 0xff, bcs);
+ inputstate |= INS_have_data;
+ inbyte >>= 8;
+ }
+ }
+ }
+ seqlen = trail1 & 7;
+ }
+
+ /* save new state */
+ bcs->inputstate = inputstate;
+ ubc->seqlen = seqlen;
+ ubc->inbyte = inbyte;
+ ubc->inbits = inbits;
+}
+
+/* trans_receive
+ * pass on received USB frame transparently as SKB via gigaset_rcv_skb
+ * invert bytes
+ * tally frames, errors etc. in BC structure counters
+ * parameters:
+ * src received data
+ * count number of received bytes
+ * bcs receiving B channel structure
+ */
+static inline void trans_receive(unsigned char *src, unsigned count,
+ struct bc_state *bcs)
+{
+ struct sk_buff *skb;
+ int dobytes;
+ unsigned char *dst;
+
+ if (unlikely(bcs->ignore)) {
+ bcs->ignore--;
+ hdlc_flush(bcs);
+ return;
+ }
+ if (unlikely((skb = bcs->skb) == NULL)) {
+ bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
+ if (!skb) {
+ err("could not allocate skb");
+ return;
+ }
+ skb_reserve(skb, HW_HDR_LEN);
+ }
+ bcs->hw.bas->goodbytes += skb->len;
+ dobytes = TRANSBUFSIZE - skb->len;
+ while (count > 0) {
+ dst = skb_put(skb, count < dobytes ? count : dobytes);
+ while (count > 0 && dobytes > 0) {
+ *dst++ = gigaset_invtab[*src++];
+ count--;
+ dobytes--;
+ }
+ if (dobytes == 0) {
+ gigaset_rcv_skb(skb, bcs->cs, bcs);
+ bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
+ if (!skb) {
+ err("could not allocate skb");
+ return;
+ }
+ skb_reserve(bcs->skb, HW_HDR_LEN);
+ dobytes = TRANSBUFSIZE;
+ }
+ }
+}
+
+void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs)
+{
+ switch (bcs->proto2) {
+ case ISDN_PROTO_L2_HDLC:
+ hdlc_unpack(src, count, bcs);
+ break;
+ default: /* assume transparent */
+ trans_receive(src, count, bcs);
+ }
+}
+
+/* == data input =========================================================== */
+
+static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned cbytes = cs->cbytes;
+
+ while (numbytes--) {
+ /* copy next character, check for end of line */
+ switch (cs->respdata[cbytes] = *src++) {
+ case '\r':
+ case '\n':
+ /* end of line */
+ dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
+ __func__, cbytes);
+ cs->cbytes = cbytes;
+ gigaset_handle_modem_response(cs);
+ cbytes = 0;
+ break;
+ default:
+ /* advance in line buffer, checking for overflow */
+ if (cbytes < MAX_RESP_SIZE - 1)
+ cbytes++;
+ else
+ warn("response too large");
+ }
+ }
+
+ /* save state */
+ cs->cbytes = cbytes;
+}
+
+
+/* process a block of data received through the control channel
+ */
+void gigaset_isoc_input(struct inbuf_t *inbuf)
+{
+ struct cardstate *cs = inbuf->cs;
+ unsigned tail, head, numbytes;
+ unsigned char *src;
+
+ head = atomic_read(&inbuf->head);
+ while (head != (tail = atomic_read(&inbuf->tail))) {
+ dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+ if (head > tail)
+ tail = RBUFSIZE;
+ src = inbuf->data + head;
+ numbytes = tail - head;
+ dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+
+ if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
+ numbytes, src, 0);
+ gigaset_if_receive(inbuf->cs, src, numbytes);
+ } else {
+ gigaset_dbg_buffer(DEBUG_CMD, "received response",
+ numbytes, src, 0);
+ cmd_loop(src, numbytes, inbuf);
+ }
+
+ head += numbytes;
+ if (head == RBUFSIZE)
+ head = 0;
+ dbg(DEBUG_INTR, "setting head to %u", head);
+ atomic_set(&inbuf->head, head);
+ }
+}
+
+
+/* == data output ========================================================== */
+
+/* gigaset_send_skb
+ * called by common.c to queue an skb for sending
+ * and start transmission if necessary
+ * parameters:
+ * B Channel control structure
+ * skb
+ * return value:
+ * number of bytes accepted for sending
+ * (skb->len if ok, 0 if out of buffer space)
+ * or error code (< 0, eg. -EINVAL)
+ */
+int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
+{
+ int len;
+
+ IFNULLRETVAL(bcs, -EFAULT);
+ IFNULLRETVAL(skb, -EFAULT);
+ len = skb->len;
+
+ skb_queue_tail(&bcs->squeue, skb);
+ dbg(DEBUG_ISO,
+ "%s: skb queued, qlen=%d", __func__, skb_queue_len(&bcs->squeue));
+
+ /* tasklet submits URB if necessary */
+ tasklet_schedule(&bcs->hw.bas->sent_tasklet);
+
+ return len; /* ok so far */
+}
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
new file mode 100644
index 00000000000..c6915fa2be6
--- /dev/null
+++ b/drivers/isdn/gigaset/proc.c
@@ -0,0 +1,81 @@
+/*
+ * Stuff used by all variants of the driver
+ *
+ * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
+ * Hansjoerg Lipp <hjlipp@web.de>,
+ * Tilman Schmidt <tilman@imap.cc>.
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: proc.c,v 1.5.2.13 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+#include <linux/ctype.h>
+
+static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct cardstate *cs = usb_get_intfdata(intf);
+ return sprintf(buf, "%d\n", atomic_read(&cs->cidmode)); // FIXME use scnprintf for 13607 bit architectures (if PAGE_SIZE==4096)
+}
+
+static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct cardstate *cs = usb_get_intfdata(intf);
+ long int value;
+ char *end;
+
+ value = simple_strtol(buf, &end, 0);
+ while (*end)
+ if (!isspace(*end++))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ if (down_interruptible(&cs->sem))
+ return -ERESTARTSYS; // FIXME -EINTR?
+
+ cs->waiting = 1;
+ if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
+ NULL, value, NULL)) {
+ cs->waiting = 0;
+ up(&cs->sem);
+ return -ENOMEM;
+ }
+
+ dbg(DEBUG_CMD, "scheduling PROC_CIDMODE");
+ gigaset_schedule_event(cs);
+
+ wait_event(cs->waitqueue, !cs->waiting);
+
+ up(&cs->sem);
+
+ return count;
+}
+
+static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
+
+/* free sysfs for device */
+void gigaset_free_dev_sysfs(struct usb_interface *interface)
+{
+ dbg(DEBUG_INIT, "removing sysfs entries");
+ device_remove_file(&interface->dev, &dev_attr_cidmode);
+}
+EXPORT_SYMBOL_GPL(gigaset_free_dev_sysfs);
+
+/* initialize sysfs for device */
+void gigaset_init_dev_sysfs(struct usb_interface *interface)
+{
+ dbg(DEBUG_INIT, "setting up sysfs");
+ device_create_file(&interface->dev, &dev_attr_cidmode);
+}
+EXPORT_SYMBOL_GPL(gigaset_init_dev_sysfs);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
new file mode 100644
index 00000000000..323fc7349de
--- /dev/null
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -0,0 +1,1008 @@
+/*
+ * USB driver for Gigaset 307x directly or using M105 Data.
+ *
+ * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>
+ * and Hansjoerg Lipp <hjlipp@web.de>.
+ *
+ * This driver was derived from the USB skeleton driver by
+ * Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * ToDo: ...
+ * =====================================================================
+ * Version: $Id: usb-gigaset.c,v 1.85.4.18 2006/02/04 18:28:16 hjlipp Exp $
+ * =====================================================================
+ */
+
+#include "gigaset.h"
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+/* Version Information */
+#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
+#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
+
+/* Module parameters */
+
+static int startmode = SM_ISDN;
+static int cidmode = 1;
+
+module_param(startmode, int, S_IRUGO);
+module_param(cidmode, int, S_IRUGO);
+MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
+MODULE_PARM_DESC(cidmode, "Call-ID mode");
+
+#define GIGASET_MINORS 1
+#define GIGASET_MINOR 8
+#define GIGASET_MODULENAME "usb_gigaset"
+#define GIGASET_DEVFSNAME "gig/usb/"
+#define GIGASET_DEVNAME "ttyGU"
+
+#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256
+
+/* Values for the Gigaset M105 Data */
+#define USB_M105_VENDOR_ID 0x0681
+#define USB_M105_PRODUCT_ID 0x0009
+
+/* table of devices that work with this driver */
+static struct usb_device_id gigaset_table [] = {
+ { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gigaset_table);
+
+/* Get a minor range for your devices from the usb maintainer */
+#define USB_SKEL_MINOR_BASE 200
+
+
+/*
+ * Control requests (empty fields: 00)
+ *
+ * RT|RQ|VALUE|INDEX|LEN |DATA
+ * In:
+ * C1 08 01
+ * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
+ * C1 0F ll ll
+ * Get device information/status (llll: 0x200 and 0x40 seen).
+ * Real size: I only saw MIN(llll,0x64).
+ * Contents: seems to be always the same...
+ * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
+ * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
+ * rest: ?
+ * Out:
+ * 41 11
+ * Initialize/reset device ?
+ * 41 00 xx 00
+ * ? (xx=00 or 01; 01 on start, 00 on close)
+ * 41 07 vv mm
+ * Set/clear flags vv=value, mm=mask (see RQ 08)
+ * 41 12 xx
+ * Used before the following configuration requests are issued
+ * (with xx=0x0f). I've seen other values<0xf, though.
+ * 41 01 xx xx
+ * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
+ * 41 03 ps bb
+ * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
+ * [ 0x30: m, 0x40: s ]
+ * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
+ * bb: bits/byte (seen 7 and 8)
+ * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
+ * ??
+ * Initialization: 01, 40, 00, 00
+ * Open device: 00 40, 00, 00
+ * yy and zz seem to be equal, either 0x00 or 0x0a
+ * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
+ * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
+ * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
+ * xx is usually 0x00 but was 0x7e before starting data transfer
+ * in unimodem mode. So, this might be an array of characters that need
+ * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
+ *
+ * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
+ * flags per packet.
+ */
+
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void gigaset_disconnect(struct usb_interface *interface);
+
+static struct gigaset_driver *driver = NULL;
+static struct cardstate *cardstate = NULL;
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver gigaset_usb_driver = {
+ .name = GIGASET_MODULENAME,
+ .probe = gigaset_probe,
+ .disconnect = gigaset_disconnect,
+ .id_table = gigaset_table,
+};
+
+struct usb_cardstate {
+ struct usb_device *udev; /* save off the usb device pointer */
+ struct usb_interface *interface; /* the interface for this device */
+ atomic_t busy; /* bulk output in progress */
+
+ /* Output buffer for commands (M105: and data)*/
+ unsigned char *bulk_out_buffer; /* the buffer to send data */
+ int bulk_out_size; /* the size of the send buffer */
+ __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
+ struct urb *bulk_out_urb; /* the urb used to transmit data */
+
+ /* Input buffer for command responses (M105: and data)*/
+ int rcvbuf_size; /* the size of the receive buffer */
+ struct urb *read_urb; /* the urb used to receive data */
+ __u8 int_in_endpointAddr; /* the address of the bulk in endpoint */
+
+ char bchars[6]; /* req. 0x19 */
+};
+
+struct usb_bc_state {};
+
+static inline unsigned tiocm_to_gigaset(unsigned state)
+{
+ return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
+}
+
+#ifdef CONFIG_GIGASET_UNDOCREQ
+/* WARNING: EXPERIMENTAL! */
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ unsigned mask, val;
+ int r;
+
+ mask = tiocm_to_gigaset(old_state ^ new_state);
+ val = tiocm_to_gigaset(new_state);
+
+ dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
+ r = usb_control_msg(cs->hw.usb->udev,
+ usb_sndctrlpipe(cs->hw.usb->udev, 0), 7, 0x41,
+ (val & 0xff) | ((mask & 0xff) << 8), 0,
+ NULL, 0, 2000 /*timeout??*/); // don't use this in an interrupt/BH
+ if (r < 0)
+ return r;
+ //..
+ return 0;
+}
+
+static int set_value(struct cardstate *cs, u8 req, u16 val)
+{
+ int r, r2;
+
+ dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val);
+ r = usb_control_msg(cs->hw.usb->udev,
+ usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x12, 0x41,
+ 0xf /*?*/, 0,
+ NULL, 0, 2000 /*?*/); /* no idea, what this does */
+ if (r < 0) {
+ err("error %d on request 0x12", -r);
+ return r;
+ }
+
+ r = usb_control_msg(cs->hw.usb->udev,
+ usb_sndctrlpipe(cs->hw.usb->udev, 0), req, 0x41,
+ val, 0,
+ NULL, 0, 2000 /*?*/);
+ if (r < 0)
+ err("error %d on request 0x%02x", -r, (unsigned)req);
+
+ r2 = usb_control_msg(cs->hw.usb->udev,
+ usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
+ 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
+ if (r2 < 0)
+ err("error %d on request 0x19", -r2);
+
+ return r < 0 ? r : (r2 < 0 ? r2 : 0);
+}
+
+/* WARNING: HIGHLY EXPERIMENTAL! */
+// don't use this in an interrupt/BH
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ u16 val;
+ u32 rate;
+
+ cflag &= CBAUD;
+
+ switch (cflag) {
+ //FIXME more values?
+ case B300: rate = 300; break;
+ case B600: rate = 600; break;
+ case B1200: rate = 1200; break;
+ case B2400: rate = 2400; break;
+ case B4800: rate = 4800; break;
+ case B9600: rate = 9600; break;
+ case B19200: rate = 19200; break;
+ case B38400: rate = 38400; break;
+ case B57600: rate = 57600; break;
+ case B115200: rate = 115200; break;
+ default:
+ rate = 9600;
+ err("unsupported baudrate request 0x%x,"
+ " using default of B9600", cflag);
+ }
+
+ val = 0x383fff / rate + 1;
+
+ return set_value(cs, 1, val);
+}
+
+/* WARNING: HIGHLY EXPERIMENTAL! */
+// don't use this in an interrupt/BH
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ u16 val = 0;
+
+ /* set the parity */
+ if (cflag & PARENB)
+ val |= (cflag & PARODD) ? 0x10 : 0x20;
+
+ /* set the number of data bits */
+ switch (cflag & CSIZE) {
+ case CS5:
+ val |= 5 << 8; break;
+ case CS6:
+ val |= 6 << 8; break;
+ case CS7:
+ val |= 7 << 8; break;
+ case CS8:
+ val |= 8 << 8; break;
+ default:
+ err("CSIZE was not CS5-CS8, using default of 8");
+ val |= 8 << 8;
+ break;
+ }
+
+ /* set the number of stop bits */
+ if (cflag & CSTOPB) {
+ if ((cflag & CSIZE) == CS5)
+ val |= 1; /* 1.5 stop bits */ //FIXME is this okay?
+ else
+ val |= 2; /* 2 stop bits */
+ }
+
+ return set_value(cs, 3, val);
+}
+
+#else
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+ unsigned new_state)
+{
+ return -EINVAL;
+}
+
+static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+
+static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
+{
+ return -EINVAL;
+}
+#endif
+
+
+ /*================================================================================================================*/
+static int gigaset_init_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_up(bcs);
+ return 0;
+}
+
+static int gigaset_close_bchannel(struct bc_state *bcs)
+{
+ /* nothing to do for M10x */
+ gigaset_bchannel_down(bcs);
+ return 0;
+}
+
+//void send_ack_to_LL(void *data);
+static int write_modem(struct cardstate *cs);
+static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb);
+
+
+/* Handling of send queue. If there is already a skb opened, put data to
+ * the transfer buffer by calling "write_modem". Otherwise take a new skb out of the queue.
+ * This function will be called by the ISR via "transmit_chars" (USB: B-Channel Bulk callback handler
+ * via immediate task queue) or by writebuf_from_LL if the LL wants to transmit data.
+ */
+static void gigaset_modem_fill(unsigned long data)
+{
+ struct cardstate *cs = (struct cardstate *) data;
+ struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
+ struct cmdbuf_t *cb;
+ unsigned long flags;
+ int again;
+
+ dbg(DEBUG_OUTPUT, "modem_fill");
+
+ if (atomic_read(&cs->hw.usb->busy)) {
+ dbg(DEBUG_OUTPUT, "modem_fill: busy");
+ return;
+ }
+
+ do {
+ again = 0;
+ if (!bcs->tx_skb) { /* no skb is being sent */
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb = cs->cmdbuf;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+ if (cb) { /* commands to send? */
+ dbg(DEBUG_OUTPUT, "modem_fill: cb");
+ if (send_cb(cs, cb) < 0) {
+ dbg(DEBUG_OUTPUT,
+ "modem_fill: send_cb failed");
+ again = 1; /* no callback will be called! */
+ }
+ } else { /* skbs to send? */
+ bcs->tx_skb = skb_dequeue(&bcs->squeue);
+ if (bcs->tx_skb)
+ dbg(DEBUG_INTR,
+ "Dequeued skb (Adr: %lx)!",
+ (unsigned long) bcs->tx_skb);
+ }
+ }
+
+ if (bcs->tx_skb) {
+ dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
+ if (write_modem(cs) < 0) {
+ dbg(DEBUG_OUTPUT,
+ "modem_fill: write_modem failed");
+ // FIXME should we tell the LL?
+ again = 1; /* no callback will be called! */
+ }
+ }
+ } while (again);
+}
+
+/**
+ * gigaset_read_int_callback
+ *
+ * It is called if the data was received from the device. This is almost similiar to
+ * the interrupt service routine in the serial device.
+ */
+static void gigaset_read_int_callback(struct urb *urb, struct pt_regs *regs)
+{
+ int resubmit = 0;
+ int r;
+ struct cardstate *cs;
+ unsigned numbytes;
+ unsigned char *src;
+ //unsigned long flags;
+ struct inbuf_t *inbuf;
+
+ IFNULLRET(urb);
+ inbuf = (struct inbuf_t *) urb->context;
+ IFNULLRET(inbuf);
+ //spin_lock_irqsave(&inbuf->lock, flags);
+ cs = inbuf->cs;
+ IFNULLGOTO(cs, exit);
+ IFNULLGOTO(cardstate, exit);
+
+ if (!atomic_read(&cs->connected)) {
+ err("%s: disconnected", __func__);
+ goto exit;
+ }
+
+ if (!urb->status) {
+ numbytes = urb->actual_length;
+
+ if (numbytes) {
+ src = inbuf->rcvbuf;
+ if (unlikely(*src))
+ warn("%s: There was no leading 0, but 0x%02x!",
+ __func__, (unsigned) *src);
+ ++src; /* skip leading 0x00 */
+ --numbytes;
+ if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
+ dbg(DEBUG_INTR, "%s-->BH", __func__);
+ gigaset_schedule_event(inbuf->cs);
+ }
+ } else
+ dbg(DEBUG_INTR, "Received zero block length");
+ resubmit = 1;
+ } else {
+ /* The urb might have been killed. */
+ dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d",
+ __func__, urb->status);
+ if (urb->status != -ENOENT) /* not killed */
+ resubmit = 1;
+ }
+exit:
+ //spin_unlock_irqrestore(&inbuf->lock, flags);
+ if (resubmit) {
+ r = usb_submit_urb(urb, SLAB_ATOMIC);
+ if (r)
+ err("error %d when resubmitting urb.", -r);
+ }
+}
+
+
+/* This callback routine is called when data was transmitted to a B-Channel.
+ * Therefore it has to check if there is still data to transmit. This
+ * happens by calling modem_fill via task queue.
+ *
+ */
+static void gigaset_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct cardstate *cs = (struct cardstate *) urb->context;
+
+ IFNULLRET(cs);
+#ifdef CONFIG_GIGASET_DEBUG
+ if (!atomic_read(&cs->connected)) {
+ err("%s:not connected", __func__);
+ return;
+ }
+#endif
+ if (urb->status)
+ err("bulk transfer failed (status %d)", -urb->status); /* That's all we can do. Communication problems
+ are handeled by timeouts or network protocols */
+
+ atomic_set(&cs->hw.usb->busy, 0);
+ tasklet_schedule(&cs->write_tasklet);
+}
+
+static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
+{
+ struct cmdbuf_t *tcb;
+ unsigned long flags;
+ int count;
+ int status = -ENOENT; // FIXME
+ struct usb_cardstate *ucs = cs->hw.usb;
+
+ do {
+ if (!cb->len) {
+ tcb = cb;
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cs->cmdbytes -= cs->curlen;
+ dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
+ cs->curlen, cs->cmdbytes);
+ cs->cmdbuf = cb = cb->next;
+ if (cb) {
+ cb->prev = NULL;
+ cs->curlen = cb->len;
+ } else {
+ cs->lastcmdbuf = NULL;
+ cs->curlen = 0;
+ }
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ if (tcb->wake_tasklet)
+ tasklet_schedule(tcb->wake_tasklet);
+ kfree(tcb);
+ }
+ if (cb) {
+ count = min(cb->len, ucs->bulk_out_size);
+ usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
+ usb_sndbulkpipe(ucs->udev,
+ ucs->bulk_out_endpointAddr & 0x0f),
+ cb->buf + cb->offset, count,
+ gigaset_write_bulk_callback, cs);
+
+ cb->offset += count;
+ cb->len -= count;
+ atomic_set(&ucs->busy, 1);
+ dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
+
+ status = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
+ if (status) {
+ atomic_set(&ucs->busy, 0);
+ err("could not submit urb (error %d).",
+ -status);
+ cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */
+ }
+ }
+ } while (cb && status); /* bei Fehler naechster Befehl //FIXME: ist das OK? */
+
+ return status;
+}
+
+/* Write string into transbuf and send it to modem.
+ */
+static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
+ int len, struct tasklet_struct *wake_tasklet)
+{
+ struct cmdbuf_t *cb;
+ unsigned long flags;
+
+ gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+ "CMD Transmit", len, buf, 0);
+
+ if (!atomic_read(&cs->connected)) {
+ err("%s: not connected", __func__);
+ return -ENODEV;
+ }
+
+ if (len <= 0)
+ return 0;
+
+ if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+ err("%s: out of memory", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(cb->buf, buf, len);
+ cb->len = len;
+ cb->offset = 0;
+ cb->next = NULL;
+ cb->wake_tasklet = wake_tasklet;
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ cb->prev = cs->lastcmdbuf;
+ if (cs->lastcmdbuf)
+ cs->lastcmdbuf->next = cb;
+ else {
+ cs->cmdbuf = cb;
+ cs->curlen = len;
+ }
+ cs->cmdbytes += len;
+ cs->lastcmdbuf = cb;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ tasklet_schedule(&cs->write_tasklet);
+ return len;
+}
+
+static int gigaset_write_room(struct cardstate *cs)
+{
+ unsigned long flags;
+ unsigned bytes;
+
+ spin_lock_irqsave(&cs->cmdlock, flags);
+ bytes = cs->cmdbytes;
+ spin_unlock_irqrestore(&cs->cmdlock, flags);
+
+ return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
+}
+
+static int gigaset_chars_in_buffer(struct cardstate *cs)
+{
+ return cs->cmdbytes;
+}
+
+static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+{
+#ifdef CONFIG_GIGASET_UNDOCREQ
+ gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf, 0);
+ memcpy(cs->hw.usb->bchars, buf, 6);
+ return usb_control_msg(cs->hw.usb->udev,
+ usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
+ 0, 0, &buf, 6, 2000);
+#else
+ return -EINVAL;
+#endif
+}
+
+static int gigaset_freebcshw(struct bc_state *bcs)
+{
+ if (!bcs->hw.usb)
+ return 0;
+ //FIXME
+ kfree(bcs->hw.usb);
+ return 1;
+}
+
+/* Initialize the b-channel structure */
+static int gigaset_initbcshw(struct bc_state *bcs)
+{
+ bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL);
+ if (!bcs->hw.usb)
+ return 0;
+
+ //bcs->hw.usb->trans_flg = READY_TO_TRNSMIT; /* B-Channel ready to transmit */
+ return 1;
+}
+
+static void gigaset_reinitbcshw(struct bc_state *bcs)
+{
+}
+
+static void gigaset_freecshw(struct cardstate *cs)
+{
+ //FIXME
+ tasklet_kill(&cs->write_tasklet);
+ kfree(cs->hw.usb);
+}
+
+static int gigaset_initcshw(struct cardstate *cs)
+{
+ struct usb_cardstate *ucs;
+
+ cs->hw.usb = ucs =
+ kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
+ if (!ucs)
+ return 0;
+
+ ucs->bchars[0] = 0;
+ ucs->bchars[1] = 0;
+ ucs->bchars[2] = 0;
+ ucs->bchars[3] = 0;
+ ucs->bchars[4] = 0x11;
+ ucs->bchars[5] = 0x13;
+ ucs->bulk_out_buffer = NULL;
+ ucs->bulk_out_urb = NULL;
+ //ucs->urb_cmd_out = NULL;
+ ucs->read_urb = NULL;
+ tasklet_init(&cs->write_tasklet,
+ &gigaset_modem_fill, (unsigned long) cs);
+
+ return 1;
+}
+
+/* Writes the data of the current open skb into the modem.
+ * We have to protect against multiple calls until the
+ * callback handler () is called , due to the fact that we
+ * are just allowed to send data once to an endpoint. Therefore
+ * we using "trans_flg" to synchonize ...
+ */
+static int write_modem(struct cardstate *cs)
+{
+ int ret;
+ int count;
+ struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
+ struct usb_cardstate *ucs = cs->hw.usb;
+ //unsigned long flags;
+
+ IFNULLRETVAL(bcs->tx_skb, -EINVAL);
+
+ dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len);
+
+ ret = -ENODEV;
+ IFNULLGOTO(ucs->bulk_out_buffer, error);
+ IFNULLGOTO(ucs->bulk_out_urb, error);
+ ret = 0;
+
+ if (!bcs->tx_skb->len) {
+ dev_kfree_skb_any(bcs->tx_skb);
+ bcs->tx_skb = NULL;
+ return -EINVAL;
+ }
+
+ /* Copy data to bulk out buffer and // FIXME copying not necessary
+ * transmit data
+ */
+ count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
+ memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count);
+ skb_pull(bcs->tx_skb, count);
+
+ usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
+ usb_sndbulkpipe(ucs->udev,
+ ucs->bulk_out_endpointAddr & 0x0f),
+ ucs->bulk_out_buffer, count,
+ gigaset_write_bulk_callback, cs);
+ atomic_set(&ucs->busy, 1);
+ dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
+
+ ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
+ if (ret) {
+ err("could not submit urb (error %d).", -ret);
+ atomic_set(&ucs->busy, 0);
+ }
+ if (!bcs->tx_skb->len) {
+ /* skb sent completely */
+ gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0?
+
+ dbg(DEBUG_INTR,
+ "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb);
+ dev_kfree_skb_any(bcs->tx_skb);
+ bcs->tx_skb = NULL;
+ }
+
+ return ret;
+error:
+ dev_kfree_skb_any(bcs->tx_skb);
+ bcs->tx_skb = NULL;
+ return ret;
+
+}
+
+static int gigaset_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ int retval;
+ struct usb_device *udev = interface_to_usbdev(interface);
+ unsigned int ifnum;
+ struct usb_host_interface *hostif;
+ struct cardstate *cs = NULL;
+ struct usb_cardstate *ucs = NULL;
+ //struct usb_interface_descriptor *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ //isdn_ctrl command;
+ int buffer_size;
+ int alt;
+ //unsigned long flags;
+
+ info("%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ retval = -ENODEV; //FIXME
+
+ /* See if the device offered us matches what we can accept */
+ if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
+ (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
+ return -ENODEV;
+
+ /* this starts to become ascii art... */
+ hostif = interface->cur_altsetting;
+ alt = hostif->desc.bAlternateSetting;
+ ifnum = hostif->desc.bInterfaceNumber; // FIXME ?
+
+ if (alt != 0 || ifnum != 0) {
+ warn("ifnum %d, alt %d", ifnum, alt);
+ return -ENODEV;
+ }
+
+ /* Reject application specific intefaces
+ *
+ */
+ if (hostif->desc.bInterfaceClass != 255) {
+ info("%s: Device matched, but iface_desc[%d]->bInterfaceClass==%d !",
+ __func__, ifnum, hostif->desc.bInterfaceClass);
+ return -ENODEV;
+ }
+
+ info("%s: Device matched ... !", __func__);
+
+ cs = gigaset_getunassignedcs(driver);
+ if (!cs) {
+ warn("No free cardstate!");
+ return -ENODEV;
+ }
+ ucs = cs->hw.usb;
+
+#if 0
+ if (usb_set_configuration(udev, udev->config[0].desc.bConfigurationValue) < 0) {
+ warn("set_configuration failed");
+ goto error;
+ }
+
+
+ if (usb_set_interface(udev, ifnum/*==0*/, alt/*==0*/) < 0) {
+ warn("usb_set_interface failed, device %d interface %d altsetting %d",
+ udev->devnum, ifnum, alt);
+ goto error;
+ }
+#endif
+
+ /* set up the endpoint information */
+ /* check out the endpoints */
+ /* We will get 2 endpoints: One for sending commands to the device (bulk out) and one to
+ * poll messages from the device(int in).
+ * Therefore we will have an almost similiar situation as with our serial port handler.
+ * If an connection will be established, we will have to create data in/out pipes
+ * dynamically...
+ */
+
+ endpoint = &hostif->endpoint[0].desc;
+
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ ucs->bulk_out_size = buffer_size;
+ ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress;
+ ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!ucs->bulk_out_buffer) {
+ err("Couldn't allocate bulk_out_buffer");
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->bulk_out_urb) {
+ err("Couldn't allocate bulk_out_buffer");
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ endpoint = &hostif->endpoint[1].desc;
+
+ atomic_set(&ucs->busy, 0);
+ ucs->udev = udev;
+ ucs->interface = interface;
+
+ ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
+ if (!ucs->read_urb) {
+ err("No free urbs available");
+ retval = -ENOMEM;
+ goto error;
+ }
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ ucs->rcvbuf_size = buffer_size;
+ ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
+ cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!cs->inbuf[0].rcvbuf) {
+ err("Couldn't allocate rcvbuf");
+ retval = -ENOMEM;
+ goto error;
+ }
+ /* Fill the interrupt urb and send it to the core */
+ usb_fill_int_urb(ucs->read_urb, udev,
+ usb_rcvintpipe(udev,
+ endpoint->bEndpointAddress & 0x0f),
+ cs->inbuf[0].rcvbuf, buffer_size,
+ gigaset_read_int_callback,
+ cs->inbuf + 0, endpoint->bInterval);
+
+ retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
+ if (retval) {
+ err("Could not submit URB!");
+ goto error;
+ }
+
+ /* tell common part that the device is ready */
+ if (startmode == SM_LOCKED)
+ atomic_set(&cs->mstate, MS_LOCKED);
+ if (!gigaset_start(cs)) {
+ tasklet_kill(&cs->write_tasklet);
+ retval = -ENODEV; //FIXME
+ goto error;
+ }
+
+ /* save address of controller structure */
+ usb_set_intfdata(interface, cs);
+
+ /* set up device sysfs */
+ gigaset_init_dev_sysfs(interface);
+ return 0;
+
+error:
+ if (ucs->read_urb)
+ usb_kill_urb(ucs->read_urb);
+ kfree(ucs->bulk_out_buffer);
+ if (ucs->bulk_out_urb != NULL)
+ usb_free_urb(ucs->bulk_out_urb);
+ kfree(cs->inbuf[0].rcvbuf);
+ if (ucs->read_urb != NULL)
+ usb_free_urb(ucs->read_urb);
+ ucs->read_urb = ucs->bulk_out_urb = NULL;
+ cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+ gigaset_unassign(cs);
+ return retval;
+}
+
+/**
+ * skel_disconnect
+ */
+static void gigaset_disconnect(struct usb_interface *interface)
+{
+ struct cardstate *cs;
+ struct usb_cardstate *ucs;
+
+ cs = usb_get_intfdata(interface);
+
+ /* clear device sysfs */
+ gigaset_free_dev_sysfs(interface);
+
+ usb_set_intfdata(interface, NULL);
+ ucs = cs->hw.usb;
+ usb_kill_urb(ucs->read_urb);
+ //info("GigaSet USB device #%d will be disconnected", minor);
+
+ gigaset_stop(cs);
+
+ tasklet_kill(&cs->write_tasklet);
+
+ usb_kill_urb(ucs->bulk_out_urb); /* FIXME: nur, wenn noetig */
+ //usb_kill_urb(ucs->urb_cmd_out); /* FIXME: nur, wenn noetig */
+
+ kfree(ucs->bulk_out_buffer);
+ if (ucs->bulk_out_urb != NULL)
+ usb_free_urb(ucs->bulk_out_urb);
+ //if(ucs->urb_cmd_out != NULL)
+ // usb_free_urb(ucs->urb_cmd_out);
+ kfree(cs->inbuf[0].rcvbuf);
+ if (ucs->read_urb != NULL)
+ usb_free_urb(ucs->read_urb);
+ ucs->read_urb = ucs->bulk_out_urb/*=ucs->urb_cmd_out*/=NULL;
+ cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+
+ gigaset_unassign(cs);
+}
+
+static struct gigaset_ops ops = {
+ gigaset_write_cmd,
+ gigaset_write_room,
+ gigaset_chars_in_buffer,
+ gigaset_brkchars,
+ gigaset_init_bchannel,
+ gigaset_close_bchannel,
+ gigaset_initbcshw,
+ gigaset_freebcshw,
+ gigaset_reinitbcshw,
+ gigaset_initcshw,
+ gigaset_freecshw,
+ gigaset_set_modem_ctrl,
+ gigaset_baud_rate,
+ gigaset_set_line_ctrl,
+ gigaset_m10x_send_skb,
+ gigaset_m10x_input,
+};
+
+/**
+ * usb_gigaset_init
+ * This function is called while kernel-module is loaded
+ */
+static int __init usb_gigaset_init(void)
+{
+ int result;
+
+ /* allocate memory for our driver state and intialize it */
+ if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ GIGASET_MODULENAME, GIGASET_DEVNAME,
+ GIGASET_DEVFSNAME, &ops,
+ THIS_MODULE)) == NULL)
+ goto error;
+
+ /* allocate memory for our device state and intialize it */
+ cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+ if (!cardstate)
+ goto error;
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&gigaset_usb_driver);
+ if (result < 0) {
+ err("usb_gigaset: usb_register failed (error %d)",
+ -result);
+ goto error;
+ }
+
+ info(DRIVER_AUTHOR);
+ info(DRIVER_DESC);
+ return 0;
+
+error: if (cardstate)
+ gigaset_freecs(cardstate);
+ cardstate = NULL;
+ if (driver)
+ gigaset_freedriver(driver);
+ driver = NULL;
+ return -1;
+}
+
+
+/**
+ * usb_gigaset_exit
+ * This function is called while unloading the kernel-module
+ */
+static void __exit usb_gigaset_exit(void)
+{
+ gigaset_blockdriver(driver); /* => probe will fail
+ * => no gigaset_start any more
+ */
+
+ gigaset_shutdown(cardstate);
+ /* from now on, no isdn callback should be possible */
+
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&gigaset_usb_driver);
+ /* this will call the disconnect-callback */
+ /* from now on, no disconnect/probe callback should be running */
+
+ gigaset_freecs(cardstate);
+ cardstate = NULL;
+ gigaset_freedriver(driver);
+ driver = NULL;
+}
+
+
+module_init(usb_gigaset_init);
+module_exit(usb_gigaset_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index 296d6a6f749..3b431723c7c 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -437,9 +437,7 @@ static inline unsigned int t1_get_slice(unsigned int base,
#endif
dp += i;
i = 0;
- if (i == 0)
- break;
- /* fall through */
+ break;
default:
*dp++ = b1_get_byte(base);
i--;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 1789b607f09..a4f7288a1fc 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -139,3 +139,4 @@ source "drivers/isdn/hysdn/Kconfig"
endmenu
+source "drivers/isdn/gigaset/Kconfig"
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4eb05d7143d..f4516ca7aa3 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/sysdev.h>
#include <linux/poll.h>
+#include <linux/mutex.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -92,7 +93,7 @@ struct smu_device {
* for now, just hard code that
*/
static struct smu_device *smu;
-static DECLARE_MUTEX(smu_part_access);
+static DEFINE_MUTEX(smu_part_access);
static void smu_i2c_retry(unsigned long data);
@@ -976,11 +977,11 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
if (interruptible) {
int rc;
- rc = down_interruptible(&smu_part_access);
+ rc = mutex_lock_interruptible(&smu_part_access);
if (rc)
return ERR_PTR(rc);
} else
- down(&smu_part_access);
+ mutex_lock(&smu_part_access);
part = (struct smu_sdbp_header *)get_property(smu->of_node,
pname, size);
@@ -990,7 +991,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
if (part != NULL && size)
*size = part->len << 2;
}
- up(&smu_part_access);
+ mutex_unlock(&smu_part_access);
return part;
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e1c18aa1d71..f8ffaee20ff 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
}
#define WRITE_POOL_SIZE 256
-/* mempool for queueing pending writes on the bitmap file */
-static void *write_pool_alloc(gfp_t gfp_flags, void *data)
-{
- return kmalloc(sizeof(struct page_list), gfp_flags);
-}
-
-static void write_pool_free(void *ptr, void *data)
-{
- kfree(ptr);
-}
/*
* just a placeholder - calls kmalloc for bitmap pages
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
spin_lock_init(&bitmap->write_lock);
INIT_LIST_HEAD(&bitmap->complete_pages);
init_waitqueue_head(&bitmap->write_wait);
- bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
- write_pool_free, NULL);
+ bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
+ sizeof(struct page_list));
err = -ENOMEM;
if (!bitmap->write_pool)
goto error;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e7a650f9ca0..259e86f2654 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -94,20 +94,6 @@ struct crypt_config {
static kmem_cache_t *_crypt_io_pool;
/*
- * Mempool alloc and free functions for the page
- */
-static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
-{
- return alloc_page(gfp_mask);
-}
-
-static void mempool_free_page(void *page, void *data)
-{
- __free_page(page);
-}
-
-
-/*
* Different IV generation algorithms:
*
* plain: the initial vector is the 32-bit low-endian version of the sector
@@ -630,15 +616,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
- cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
- mempool_free_slab, _crypt_io_pool);
+ cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
if (!cc->io_pool) {
ti->error = PFX "Cannot allocate crypt io mempool";
goto bad3;
}
- cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
- mempool_free_page, NULL);
+ cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = PFX "Cannot allocate page mempool";
goto bad4;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4809b209fbb..da663d2ff55 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -32,16 +32,6 @@ struct io {
static unsigned _num_ios;
static mempool_t *_io_pool;
-static void *alloc_io(gfp_t gfp_mask, void *pool_data)
-{
- return kmalloc(sizeof(struct io), gfp_mask);
-}
-
-static void free_io(void *element, void *pool_data)
-{
- kfree(element);
-}
-
static unsigned int pages_to_ios(unsigned int pages)
{
return 4 * pages; /* too many ? */
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios)
} else {
/* create new pool */
- _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
+ _io_pool = mempool_create_kmalloc_pool(new_ios,
+ sizeof(struct io));
if (!_io_pool)
return -ENOMEM;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f72a82fb943..1816f30678e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -179,8 +179,7 @@ static struct multipath *alloc_multipath(void)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
INIT_WORK(&m->trigger_event, trigger_event, m);
- m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
- mempool_free_slab, _mpio_cache);
+ m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
return NULL;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6cfa8d435d5..4e90f231fbf 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
/* FIXME move this */
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-static void *region_alloc(gfp_t gfp_mask, void *pool_data)
-{
- return kmalloc(sizeof(struct region), gfp_mask);
-}
-
-static void region_free(void *element, void *pool_data)
-{
- kfree(element);
-}
-
#define MIN_REGIONS 64
#define MAX_RECOVERY 1
static int rh_init(struct region_hash *rh, struct mirror_set *ms,
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
- rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
- region_free, NULL);
+ rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
+ sizeof(struct region));
if (!rh->region_pool) {
vfree(rh->buckets);
rh->buckets = NULL;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f3759dd7828..7401540086d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1174,8 +1174,7 @@ static int __init dm_snapshot_init(void)
goto bad4;
}
- pending_pool = mempool_create(128, mempool_alloc_slab,
- mempool_free_slab, pending_cache);
+ pending_pool = mempool_create_slab_pool(128, pending_cache);
if (!pending_pool) {
DMERR("Couldn't create pending pool.");
r = -ENOMEM;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8c82373f7ff..a64798ef481 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -823,13 +823,11 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
md->queue->unplug_fn = dm_unplug_all;
md->queue->issue_flush_fn = dm_flush_all;
- md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
- mempool_free_slab, _io_cache);
+ md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
goto bad2;
- md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
- mempool_free_slab, _tio_cache);
+ md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
if (!md->tio_pool)
goto bad3;
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 8b3515f394a..9dcb2c8a385 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -227,8 +227,7 @@ static int jobs_init(void)
if (!_job_cache)
return -ENOMEM;
- _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab,
- mempool_free_slab, _job_cache);
+ _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!_job_pool) {
kmem_cache_destroy(_job_cache);
return -ENOMEM;
@@ -590,51 +589,51 @@ static void client_del(struct kcopyd_client *kc)
up(&_client_lock);
}
-static DECLARE_MUTEX(kcopyd_init_lock);
+static DEFINE_MUTEX(kcopyd_init_lock);
static int kcopyd_clients = 0;
static int kcopyd_init(void)
{
int r;
- down(&kcopyd_init_lock);
+ mutex_lock(&kcopyd_init_lock);
if (kcopyd_clients) {
/* Already initialized. */
kcopyd_clients++;
- up(&kcopyd_init_lock);
+ mutex_unlock(&kcopyd_init_lock);
return 0;
}
r = jobs_init();
if (r) {
- up(&kcopyd_init_lock);
+ mutex_unlock(&kcopyd_init_lock);
return r;
}
_kcopyd_wq = create_singlethread_workqueue("kcopyd");
if (!_kcopyd_wq) {
jobs_exit();
- up(&kcopyd_init_lock);
+ mutex_unlock(&kcopyd_init_lock);
return -ENOMEM;
}
kcopyd_clients++;
INIT_WORK(&_kcopyd_work, do_work, NULL);
- up(&kcopyd_init_lock);
+ mutex_unlock(&kcopyd_init_lock);
return 0;
}
static void kcopyd_exit(void)
{
- down(&kcopyd_init_lock);
+ mutex_lock(&kcopyd_init_lock);
kcopyd_clients--;
if (!kcopyd_clients) {
jobs_exit();
destroy_workqueue(_kcopyd_wq);
_kcopyd_wq = NULL;
}
- up(&kcopyd_init_lock);
+ mutex_unlock(&kcopyd_init_lock);
}
int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 96f7af4ae40..1cc9de44ce8 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,18 +35,6 @@
#define NR_RESERVED_BUFS 32
-static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
-{
- struct multipath_bh *mpb;
- mpb = kzalloc(sizeof(*mpb), gfp_flags);
- return mpb;
-}
-
-static void mp_pool_free(void *mpb, void *data)
-{
- kfree(mpb);
-}
-
static int multipath_map (multipath_conf_t *conf)
{
int i, disks = conf->raid_disks;
@@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev)
}
mddev->degraded = conf->raid_disks = conf->working_disks;
- conf->pool = mempool_create(NR_RESERVED_BUFS,
- mp_pool_alloc, mp_pool_free,
- NULL);
+ conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS,
+ sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b09fb630715..7d4c5497785 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void)
goto exit;
}
- i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE,
- mempool_alloc_slab,
- mempool_free_slab,
- i2o_blk_req_pool.slab);
+ i2o_blk_req_pool.pool =
+ mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
+ i2o_blk_req_pool.slab);
if (!i2o_blk_req_pool.pool) {
osm_err("can't init request mempool\n");
rc = -ENOMEM;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d339308539f..70f63891b19 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -196,8 +196,6 @@
#define DRV_NAME "3c59x"
-#define DRV_VERSION "LK1.1.19"
-#define DRV_RELDATE "10 Nov 2002"
@@ -275,10 +273,8 @@ static char version[] __devinitdata =
DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "
- DRV_VERSION " " DRV_RELDATE);
+MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
/* Operational parameter that usually are not changed. */
@@ -904,7 +900,6 @@ static void acpi_set_WOL(struct net_device *dev);
static struct ethtool_ops vortex_ethtool_ops;
static void set_8021q_mode(struct net_device *dev, int enable);
-
/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
/* Option count limit only -- unlimited interfaces are supported. */
#define MAX_UNITS 8
@@ -919,8 +914,6 @@ static int global_full_duplex = -1;
static int global_enable_wol = -1;
static int global_use_mmio = -1;
-/* #define dev_alloc_skb dev_alloc_skb_debug */
-
/* Variables to work-around the Compaq PCI BIOS32 problem. */
static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
static struct net_device *compaq_net_device;
@@ -976,7 +969,7 @@ static void poll_vortex(struct net_device *dev)
#ifdef CONFIG_PM
-static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
+static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -994,7 +987,7 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int vortex_resume (struct pci_dev *pdev)
+static int vortex_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp = netdev_priv(dev);
@@ -1027,8 +1020,8 @@ static struct eisa_device_id vortex_eisa_ids[] = {
{ "" }
};
-static int vortex_eisa_probe (struct device *device);
-static int vortex_eisa_remove (struct device *device);
+static int vortex_eisa_probe(struct device *device);
+static int vortex_eisa_remove(struct device *device);
static struct eisa_driver vortex_eisa_driver = {
.id_table = vortex_eisa_ids,
@@ -1039,12 +1032,12 @@ static struct eisa_driver vortex_eisa_driver = {
}
};
-static int vortex_eisa_probe (struct device *device)
+static int vortex_eisa_probe(struct device *device)
{
void __iomem *ioaddr;
struct eisa_device *edev;
- edev = to_eisa_device (device);
+ edev = to_eisa_device(device);
if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
@@ -1053,7 +1046,7 @@ static int vortex_eisa_probe (struct device *device)
if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
edev->id.driver_data, vortex_cards_found)) {
- release_region (edev->base_addr, VORTEX_TOTAL_SIZE);
+ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
return -ENODEV;
}
@@ -1062,15 +1055,15 @@ static int vortex_eisa_probe (struct device *device)
return 0;
}
-static int vortex_eisa_remove (struct device *device)
+static int vortex_eisa_remove(struct device *device)
{
struct eisa_device *edev;
struct net_device *dev;
struct vortex_private *vp;
void __iomem *ioaddr;
- edev = to_eisa_device (device);
- dev = eisa_get_drvdata (edev);
+ edev = to_eisa_device(device);
+ dev = eisa_get_drvdata(edev);
if (!dev) {
printk("vortex_eisa_remove called for Compaq device!\n");
@@ -1080,17 +1073,17 @@ static int vortex_eisa_remove (struct device *device)
vp = netdev_priv(dev);
ioaddr = vp->ioaddr;
- unregister_netdev (dev);
- iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD);
- release_region (dev->base_addr, VORTEX_TOTAL_SIZE);
+ unregister_netdev(dev);
+ iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
+ release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
- free_netdev (dev);
+ free_netdev(dev);
return 0;
}
#endif
/* returns count found (>= 0), or negative on error */
-static int __init vortex_eisa_init (void)
+static int __init vortex_eisa_init(void)
{
int eisa_found = 0;
int orig_cards_found = vortex_cards_found;
@@ -1121,7 +1114,7 @@ static int __init vortex_eisa_init (void)
}
/* returns count (>= 0), or negative on error */
-static int __devinit vortex_init_one (struct pci_dev *pdev,
+static int __devinit vortex_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc, unit, pci_bar;
@@ -1129,7 +1122,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
void __iomem *ioaddr;
/* wake up and enable device */
- rc = pci_enable_device (pdev);
+ rc = pci_enable_device(pdev);
if (rc < 0)
goto out;
@@ -1151,7 +1144,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
ent->driver_data, unit);
if (rc < 0) {
- pci_disable_device (pdev);
+ pci_disable_device(pdev);
goto out;
}
@@ -1236,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev,
if (print_info)
printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
- printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n",
+ printk(KERN_INFO "%s: 3Com %s %s at %p.\n",
print_name,
pdev ? "PCI" : "EISA",
vci->name,
@@ -1266,7 +1259,7 @@ static int __devinit vortex_probe1(struct device *gendev,
/* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER)
- pci_set_master (pdev);
+ pci_set_master(pdev);
if (vci->drv_flags & IS_VORTEX) {
u8 pci_latency;
@@ -1310,7 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev,
if (pdev)
pci_set_drvdata(pdev, dev);
if (edev)
- eisa_set_drvdata (edev, dev);
+ eisa_set_drvdata(edev, dev);
vp->media_override = 7;
if (option >= 0) {
@@ -1335,7 +1328,7 @@ static int __devinit vortex_probe1(struct device *gendev,
vp->enable_wol = 1;
}
- vp->force_fd = vp->full_duplex;
+ vp->mii.force_media = vp->full_duplex;
vp->options = option;
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
@@ -1625,6 +1618,46 @@ issue_and_wait(struct net_device *dev, int cmd)
}
static void
+vortex_set_duplex(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ printk(KERN_INFO "%s: setting %s-duplex.\n",
+ dev->name, (vp->full_duplex) ? "full" : "half");
+
+ EL3WINDOW(3);
+ /* Set the full-duplex bit. */
+ iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+ (vp->large_frames ? 0x40 : 0) |
+ ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
+ 0x100 : 0),
+ ioaddr + Wn3_MAC_Ctrl);
+
+ issue_and_wait(dev, TxReset);
+ /*
+ * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+ */
+ issue_and_wait(dev, RxReset|0x04);
+}
+
+static void vortex_check_media(struct net_device *dev, unsigned int init)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned int ok_to_print = 0;
+
+ if (vortex_debug > 3)
+ ok_to_print = 1;
+
+ if (mii_check_media(&vp->mii, ok_to_print, init)) {
+ vp->full_duplex = vp->mii.full_duplex;
+ vortex_set_duplex(dev);
+ } else if (init) {
+ vortex_set_duplex(dev);
+ }
+}
+
+static void
vortex_up(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
@@ -1684,53 +1717,20 @@ vortex_up(struct net_device *dev)
printk(KERN_DEBUG "%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
- vp->full_duplex = vp->force_fd;
+ vp->full_duplex = vp->mii.force_media;
config = BFINS(config, dev->if_port, 20, 4);
if (vortex_debug > 6)
printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
iowrite32(config, ioaddr + Wn3_Config);
+ netif_carrier_off(dev);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
- int mii_reg1, mii_reg5;
EL3WINDOW(4);
- /* Read BMSR (reg1) only to clear old status. */
- mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
- mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
- if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
- netif_carrier_off(dev); /* No MII device or no link partner report */
- } else {
- mii_reg5 &= vp->advertising;
- if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
- || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
- vp->full_duplex = 1;
- netif_carrier_on(dev);
- }
- vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
- if (vortex_debug > 1)
- printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
- " info1 %04x, setting %s-duplex.\n",
- dev->name, vp->phys[0],
- mii_reg1, mii_reg5,
- vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
- EL3WINDOW(3);
- }
-
- /* Set the full-duplex bit. */
- iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
- (vp->large_frames ? 0x40 : 0) |
- ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
- ioaddr + Wn3_MAC_Ctrl);
-
- if (vortex_debug > 1) {
- printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
- dev->name, config);
+ vortex_check_media(dev, 1);
}
+ else
+ vortex_set_duplex(dev);
- issue_and_wait(dev, TxReset);
- /*
- * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
- */
- issue_and_wait(dev, RxReset|0x04);
iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
@@ -1805,7 +1805,6 @@ vortex_up(struct net_device *dev)
set_8021q_mode(dev, 1);
iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
-// issue_and_wait(dev, SetTxStart|0x07ff);
iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
@@ -1892,7 +1891,7 @@ vortex_timer(unsigned long data)
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
int ok = 0;
- int media_status, mii_status, old_window;
+ int media_status, old_window;
if (vortex_debug > 2) {
printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
@@ -1900,8 +1899,6 @@ vortex_timer(unsigned long data)
printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
}
- if (vp->medialock)
- goto leave_media_alone;
disable_irq(dev->irq);
old_window = ioread16(ioaddr + EL3_CMD) >> 13;
EL3WINDOW(4);
@@ -1924,44 +1921,9 @@ vortex_timer(unsigned long data)
break;
case XCVR_MII: case XCVR_NWAY:
{
- spin_lock_bh(&vp->lock);
- mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
- if (!(mii_status & BMSR_LSTATUS)) {
- /* Re-read to get actual link status */
- mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
- }
ok = 1;
- if (vortex_debug > 2)
- printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
- dev->name, mii_status);
- if (mii_status & BMSR_LSTATUS) {
- int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
- if (! vp->force_fd && mii_reg5 != 0xffff) {
- int duplex;
-
- mii_reg5 &= vp->advertising;
- duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
- if (vp->full_duplex != duplex) {
- vp->full_duplex = duplex;
- printk(KERN_INFO "%s: Setting %s-duplex based on MII "
- "#%d link partner capability of %4.4x.\n",
- dev->name, vp->full_duplex ? "full" : "half",
- vp->phys[0], mii_reg5);
- /* Set the full-duplex bit. */
- EL3WINDOW(3);
- iowrite16( (vp->full_duplex ? 0x20 : 0) |
- (vp->large_frames ? 0x40 : 0) |
- ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
- ioaddr + Wn3_MAC_Ctrl);
- if (vortex_debug > 1)
- printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
- /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
- }
- }
- netif_carrier_on(dev);
- } else {
- netif_carrier_off(dev);
- }
+ spin_lock_bh(&vp->lock);
+ vortex_check_media(dev, 0);
spin_unlock_bh(&vp->lock);
}
break;
@@ -1971,7 +1933,14 @@ vortex_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name, media_status);
ok = 1;
}
- if ( ! ok) {
+
+ if (!netif_carrier_ok(dev))
+ next_tick = 5*HZ;
+
+ if (vp->medialock)
+ goto leave_media_alone;
+
+ if (!ok) {
unsigned int config;
do {
@@ -2004,14 +1973,14 @@ vortex_timer(unsigned long data)
printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
/* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
}
- EL3WINDOW(old_window);
- enable_irq(dev->irq);
leave_media_alone:
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
+ EL3WINDOW(old_window);
+ enable_irq(dev->irq);
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -2206,7 +2175,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
- iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
+ iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
ioaddr + Wn7_MasterAddr);
iowrite16(len, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
@@ -2983,20 +2952,6 @@ static int vortex_nway_reset(struct net_device *dev)
return rc;
}
-static u32 vortex_get_link(struct net_device *dev)
-{
- struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&vp->lock, flags);
- EL3WINDOW(4);
- rc = mii_link_ok(&vp->mii);
- spin_unlock_irqrestore(&vp->lock, flags);
- return rc;
-}
-
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
@@ -3077,7 +3032,6 @@ static void vortex_get_drvinfo(struct net_device *dev,
struct vortex_private *vp = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
if (VORTEX_PCI(vp)) {
strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
} else {
@@ -3098,9 +3052,9 @@ static struct ethtool_ops vortex_ethtool_ops = {
.get_stats_count = vortex_get_stats_count,
.get_settings = vortex_get_settings,
.set_settings = vortex_set_settings,
- .get_link = vortex_get_link,
+ .get_link = ethtool_op_get_link,
.nway_reset = vortex_nway_reset,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
#ifdef CONFIG_PCI
@@ -3301,7 +3255,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
}
return;
}
-
+
/* ACPI: Advanced Configuration and Power Interface. */
/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
static void acpi_set_WOL(struct net_device *dev)
@@ -3325,7 +3279,7 @@ static void acpi_set_WOL(struct net_device *dev)
}
-static void __devexit vortex_remove_one (struct pci_dev *pdev)
+static void __devexit vortex_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp;
@@ -3381,7 +3335,7 @@ static int vortex_have_pci;
static int vortex_have_eisa;
-static int __init vortex_init (void)
+static int __init vortex_init(void)
{
int pci_rc, eisa_rc;
@@ -3397,14 +3351,14 @@ static int __init vortex_init (void)
}
-static void __exit vortex_eisa_cleanup (void)
+static void __exit vortex_eisa_cleanup(void)
{
struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
/* Take care of the EISA devices */
- eisa_driver_unregister (&vortex_eisa_driver);
+ eisa_driver_unregister(&vortex_eisa_driver);
#endif
if (compaq_net_device) {
@@ -3412,33 +3366,24 @@ static void __exit vortex_eisa_cleanup (void)
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
- unregister_netdev (compaq_net_device);
- iowrite16 (TotalReset, ioaddr + EL3_CMD);
+ unregister_netdev(compaq_net_device);
+ iowrite16(TotalReset, ioaddr + EL3_CMD);
release_region(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
- free_netdev (compaq_net_device);
+ free_netdev(compaq_net_device);
}
}
-static void __exit vortex_cleanup (void)
+static void __exit vortex_cleanup(void)
{
if (vortex_have_pci)
- pci_unregister_driver (&vortex_driver);
+ pci_unregister_driver(&vortex_driver);
if (vortex_have_eisa)
- vortex_eisa_cleanup ();
+ vortex_eisa_cleanup();
}
module_init(vortex_init);
module_exit(vortex_cleanup);
-
-
-/*
- * Local variables:
- * c-indent-level: 4
- * c-basic-offset: 4
- * tab-width: 4
- * End:
- */
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 253440a9802..8429ceb0138 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1693,7 +1693,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs
*
* Process receive interrupt events,
* put buffer to higher layer and refill buffer pool
- * Note: This fucntion is called by interrupt handler,
+ * Note: This function is called by interrupt handler,
* don't do "too much" work here
*/
@@ -1840,7 +1840,7 @@ static int sis900_rx(struct net_device *net_dev)
*
* Check for error condition and free socket buffer etc
* schedule for more transmission as needed
- * Note: This fucntion is called by interrupt handler,
+ * Note: This function is called by interrupt handler,
* don't do "too much" work here
*/
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 1ff5de076d2..4505540e3c5 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -105,6 +105,7 @@
#include <linux/delay.h>
#include <net/syncppp.h>
#include <linux/hdlc.h>
+#include <linux/mutex.h>
/* Version */
static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
@@ -112,7 +113,7 @@ static int debug;
static int quartz;
#ifdef CONFIG_DSCC4_PCI_RST
-static DECLARE_MUTEX(dscc4_sem);
+static DEFINE_MUTEX(dscc4_mutex);
static u32 dscc4_pci_config_store[16];
#endif
@@ -1018,7 +1019,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
{
int i;
- down(&dscc4_sem);
+ mutex_lock(&dscc4_mutex);
for (i = 0; i < 16; i++)
pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
@@ -1039,7 +1040,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
for (i = 0; i < 16; i++)
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
- up(&dscc4_sem);
+ mutex_unlock(&dscc4_mutex);
}
#else
#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index ea62bed6bc8..bbbfd79adba 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -32,6 +32,7 @@
#include <linux/kmod.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <asm/irq.h>
#undef PARPORT_PARANOID
@@ -50,7 +51,7 @@ static DEFINE_SPINLOCK(full_list_lock);
static LIST_HEAD(drivers);
-static DECLARE_MUTEX(registration_lock);
+static DEFINE_MUTEX(registration_lock);
/* What you can do to a port that's gone away.. */
static void dead_write_lines (struct parport *p, unsigned char b){}
@@ -158,11 +159,11 @@ int parport_register_driver (struct parport_driver *drv)
if (list_empty(&portlist))
get_lowlevel_driver ();
- down(&registration_lock);
+ mutex_lock(&registration_lock);
list_for_each_entry(port, &portlist, list)
drv->attach(port);
list_add(&drv->list, &drivers);
- up(&registration_lock);
+ mutex_unlock(&registration_lock);
return 0;
}
@@ -188,11 +189,11 @@ void parport_unregister_driver (struct parport_driver *drv)
{
struct parport *port;
- down(&registration_lock);
+ mutex_lock(&registration_lock);
list_del_init(&drv->list);
list_for_each_entry(port, &portlist, list)
drv->detach(port);
- up(&registration_lock);
+ mutex_unlock(&registration_lock);
}
static void free_port (struct parport *port)
@@ -366,7 +367,7 @@ void parport_announce_port (struct parport *port)
#endif
parport_proc_register(port);
- down(&registration_lock);
+ mutex_lock(&registration_lock);
spin_lock_irq(&parportlist_lock);
list_add_tail(&port->list, &portlist);
for (i = 1; i < 3; i++) {
@@ -383,7 +384,7 @@ void parport_announce_port (struct parport *port)
if (slave)
attach_driver_chain(slave);
}
- up(&registration_lock);
+ mutex_unlock(&registration_lock);
}
/**
@@ -409,7 +410,7 @@ void parport_remove_port(struct parport *port)
{
int i;
- down(&registration_lock);
+ mutex_lock(&registration_lock);
/* Spread the word. */
detach_driver_chain (port);
@@ -436,7 +437,7 @@ void parport_remove_port(struct parport *port)
}
spin_unlock(&parportlist_lock);
- up(&registration_lock);
+ mutex_unlock(&registration_lock);
parport_proc_unregister(port);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 3eefe2cec72..46825fee3ae 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -19,7 +19,7 @@
#include <linux/string.h>
#include <asm/pci-bridge.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/rtas.h>
#include <asm/vio.h>
@@ -27,7 +27,7 @@
#include "rpaphp.h"
#include "rpadlpar.h"
-static DECLARE_MUTEX(rpadlpar_sem);
+static DEFINE_MUTEX(rpadlpar_mutex);
#define DLPAR_MODULE_NAME "rpadlpar_io"
@@ -300,7 +300,7 @@ int dlpar_add_slot(char *drc_name)
int node_type;
int rc = -EIO;
- if (down_interruptible(&rpadlpar_sem))
+ if (mutex_lock_interruptible(&rpadlpar_mutex))
return -ERESTARTSYS;
/* Find newly added node */
@@ -324,7 +324,7 @@ int dlpar_add_slot(char *drc_name)
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
- up(&rpadlpar_sem);
+ mutex_unlock(&rpadlpar_mutex);
return rc;
}
@@ -417,7 +417,7 @@ int dlpar_remove_slot(char *drc_name)
int node_type;
int rc = 0;
- if (down_interruptible(&rpadlpar_sem))
+ if (mutex_lock_interruptible(&rpadlpar_mutex))
return -ERESTARTSYS;
dn = find_dlpar_node(drc_name, &node_type);
@@ -439,7 +439,7 @@ int dlpar_remove_slot(char *drc_name)
}
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
exit:
- up(&rpadlpar_sem);
+ mutex_unlock(&rpadlpar_mutex);
return rc;
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index c402da8e78a..8cb9abde736 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/types.h>
+#include <linux/mutex.h>
#include <asm/sn/addrs.h>
#include <asm/sn/l1.h>
@@ -81,7 +82,7 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
.get_power_status = get_power_status,
};
-static DECLARE_MUTEX(sn_hotplug_sem);
+static DEFINE_MUTEX(sn_hotplug_mutex);
static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot,
char *buf)
@@ -346,7 +347,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
int rc;
/* Serialize the Linux PCI infrastructure */
- down(&sn_hotplug_sem);
+ mutex_lock(&sn_hotplug_mutex);
/*
* Power-on and initialize the slot in the SN
@@ -354,7 +355,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
*/
rc = sn_slot_enable(bss_hotplug_slot, slot->device_num);
if (rc) {
- up(&sn_hotplug_sem);
+ mutex_unlock(&sn_hotplug_mutex);
return rc;
}
@@ -362,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
PCI_DEVFN(slot->device_num + 1, 0));
if (!num_funcs) {
dev_dbg(slot->pci_bus->self, "no device in slot\n");
- up(&sn_hotplug_sem);
+ mutex_unlock(&sn_hotplug_mutex);
return -ENODEV;
}
@@ -402,7 +403,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
if (new_ppb)
pci_bus_add_devices(new_bus);
- up(&sn_hotplug_sem);
+ mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
dev_dbg(slot->pci_bus->self,
@@ -422,7 +423,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
int rc;
/* Acquire update access to the bus */
- down(&sn_hotplug_sem);
+ mutex_lock(&sn_hotplug_mutex);
/* is it okay to bring this slot down? */
rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
@@ -450,7 +451,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
PCI_REQ_SLOT_DISABLE);
leaving:
/* Release the bus lock */
- up(&sn_hotplug_sem);
+ mutex_unlock(&sn_hotplug_mutex);
return rc;
}
@@ -462,9 +463,9 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
struct pcibus_info *pcibus_info;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
- down(&sn_hotplug_sem);
+ mutex_lock(&sn_hotplug_mutex);
*value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
- up(&sn_hotplug_sem);
+ mutex_unlock(&sn_hotplug_mutex);
return 0;
}
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index b1b4b683cbd..ac7c2bb6c69 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/isapnp.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#if 0
@@ -92,7 +93,7 @@ MODULE_LICENSE("GPL");
#define _LTAG_FIXEDMEM32RANGE 0x86
static unsigned char isapnp_checksum_value;
-static DECLARE_MUTEX(isapnp_cfg_mutex);
+static DEFINE_MUTEX(isapnp_cfg_mutex);
static int isapnp_detected;
static int isapnp_csn_count;
@@ -903,7 +904,7 @@ int isapnp_cfg_begin(int csn, int logdev)
{
if (csn < 1 || csn > isapnp_csn_count || logdev > 10)
return -EINVAL;
- down(&isapnp_cfg_mutex);
+ mutex_lock(&isapnp_cfg_mutex);
isapnp_wait();
isapnp_key();
isapnp_wake(csn);
@@ -929,7 +930,7 @@ int isapnp_cfg_begin(int csn, int logdev)
int isapnp_cfg_end(void)
{
isapnp_wait();
- up(&isapnp_cfg_mutex);
+ mutex_unlock(&isapnp_cfg_mutex);
return 0;
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index bd06607a5dc..eecb2afad5c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,6 +28,7 @@
#include <linux/major.h>
#include <linux/kdev_t.h>
#include <linux/device.h>
+#include <linux/mutex.h>
struct class *class3270;
@@ -59,7 +60,7 @@ struct raw3270 {
#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
-static DECLARE_MUTEX(raw3270_sem);
+static DEFINE_MUTEX(raw3270_mutex);
/* List of 3270 devices. */
static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
@@ -815,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
* number for it. Note: there is no device with minor 0,
* see special case for fs3270.c:fs3270_open().
*/
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
/* Keep the list sorted. */
minor = RAW3270_FIRSTMINOR;
rp->minor = -1;
@@ -832,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
rp->minor = minor;
list_add_tail(&rp->list, &raw3270_devices);
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
/* No free minor number? Then give up. */
if (rp->minor == -1)
return -EUSERS;
@@ -1003,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
if (minor <= 0)
return -ENODEV;
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
rc = -ENODEV;
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
@@ -1024,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
return rc;
}
@@ -1038,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
struct raw3270_view *view, *tmp;
unsigned long flags;
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
view = ERR_PTR(-ENODEV);
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
@@ -1057,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
return view;
}
@@ -1104,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp)
struct ccw_device *cdev;
/* Remove from device chain. */
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
if (rp->clttydev)
class_device_destroy(class3270,
MKDEV(IBM_TTY3270_MAJOR, rp->minor));
@@ -1112,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp)
class_device_destroy(class3270,
MKDEV(IBM_FS3270_MAJOR, rp->minor));
list_del_init(&rp->list);
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
/* Disconnect from ccw_device. */
cdev = rp->cdev;
@@ -1208,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int))
if (!np)
return -ENOMEM;
np->notifier = notifier;
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
list_add_tail(&np->list, &raw3270_notifier);
list_for_each_entry(rp, &raw3270_devices, list) {
get_device(&rp->cdev->dev);
notifier(rp->minor, 1);
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
return 0;
}
@@ -1222,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
{
struct raw3270_notifier *np;
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
if (np->notifier == notifier) {
list_del(&np->list);
kfree(np);
break;
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
}
/*
@@ -1256,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev)
goto failure;
raw3270_create_attributes(rp);
set_bit(RAW3270_FLAGS_READY, &rp->flags);
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->notifier(rp->minor, 1);
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
return 0;
failure:
@@ -1307,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev)
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->notifier(rp->minor, 0);
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
/* Reset 3270 device. */
raw3270_reset_device(rp);
@@ -1370,13 +1371,13 @@ raw3270_init(void)
rc = ccw_driver_register(&raw3270_ccw_driver);
if (rc == 0) {
/* Create attributes for early (= console) device. */
- down(&raw3270_sem);
+ mutex_lock(&raw3270_mutex);
class3270 = class_create(THIS_MODULE, "3270");
list_for_each_entry(rp, &raw3270_devices, list) {
get_device(&rp->cdev->dev);
raw3270_create_attributes(rp);
}
- up(&raw3270_sem);
+ mutex_unlock(&raw3270_mutex);
}
return rc;
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 95b92f317b6..395cfc6a344 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
device_unregister(&unit->sysfs_device);
}
-static void *
-zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
-{
- return kmalloc((size_t) size, gfp_mask);
-}
-
-static void
-zfcp_mempool_free(void *element, void *size)
-{
- kfree(element);
-}
-
/*
* Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
* commands.
@@ -853,51 +841,39 @@ static int
zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
adapter->pool.fsf_req_erp =
- mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR,
- zfcp_mempool_alloc, zfcp_mempool_free, (void *)
- sizeof(struct zfcp_fsf_req_pool_element));
-
- if (NULL == adapter->pool.fsf_req_erp)
+ mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
+ sizeof(struct zfcp_fsf_req_pool_element));
+ if (!adapter->pool.fsf_req_erp)
return -ENOMEM;
adapter->pool.fsf_req_scsi =
- mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR,
- zfcp_mempool_alloc, zfcp_mempool_free, (void *)
- sizeof(struct zfcp_fsf_req_pool_element));
-
- if (NULL == adapter->pool.fsf_req_scsi)
+ mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
+ sizeof(struct zfcp_fsf_req_pool_element));
+ if (!adapter->pool.fsf_req_scsi)
return -ENOMEM;
adapter->pool.fsf_req_abort =
- mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR,
- zfcp_mempool_alloc, zfcp_mempool_free, (void *)
- sizeof(struct zfcp_fsf_req_pool_element));
-
- if (NULL == adapter->pool.fsf_req_abort)
+ mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
+ sizeof(struct zfcp_fsf_req_pool_element));
+ if (!adapter->pool.fsf_req_abort)
return -ENOMEM;
adapter->pool.fsf_req_status_read =
- mempool_create(ZFCP_POOL_STATUS_READ_NR,
- zfcp_mempool_alloc, zfcp_mempool_free,
- (void *) sizeof(struct zfcp_fsf_req));
-
- if (NULL == adapter->pool.fsf_req_status_read)
+ mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
+ sizeof(struct zfcp_fsf_req));
+ if (!adapter->pool.fsf_req_status_read)
return -ENOMEM;
adapter->pool.data_status_read =
- mempool_create(ZFCP_POOL_STATUS_READ_NR,
- zfcp_mempool_alloc, zfcp_mempool_free,
- (void *) sizeof(struct fsf_status_read_buffer));
-
- if (NULL == adapter->pool.data_status_read)
+ mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
+ sizeof(struct fsf_status_read_buffer));
+ if (!adapter->pool.data_status_read)
return -ENOMEM;
adapter->pool.data_gid_pn =
- mempool_create(ZFCP_POOL_DATA_GID_PN_NR,
- zfcp_mempool_alloc, zfcp_mempool_free, (void *)
- sizeof(struct zfcp_gid_pn_data));
-
- if (NULL == adapter->pool.data_gid_pn)
+ mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR,
+ sizeof(struct zfcp_gid_pn_data));
+ if (!adapter->pool.data_gid_pn)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7b82ff090d4..2068b66822b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -3200,8 +3200,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
* Data-Out PDU's within R2T-sequence can be quite big;
* using mempool
*/
- ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
- mempool_alloc_slab, mempool_free_slab, taskcache);
+ ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX,
+ taskcache);
if (ctask->datapool == NULL) {
kfifo_free(ctask->r2tqueue);
iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 352df47bcac..07017658ac5 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,18 +38,6 @@
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
-static void *
-lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
-{
- return kmalloc((unsigned long)data, gfp_flags);
-}
-
-static void
-lpfc_pool_kfree(void *obj, void *data)
-{
- kfree(obj);
-}
-
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
@@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
pool->current_count++;
}
- phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
- lpfc_pool_kmalloc, lpfc_pool_kfree,
- (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
+ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ sizeof(LPFC_MBOXQ_t));
if (!phba->mbox_mem_pool)
goto fail_free_mbuf_pool;
- phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
- lpfc_pool_kmalloc, lpfc_pool_kfree,
- (void *)(unsigned long)sizeof(struct lpfc_nodelist));
+ phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ sizeof(struct lpfc_nodelist));
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 029bbf461bb..017729c59a4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2154,8 +2154,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
int rval;
rval = QLA_SUCCESS;
- ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
- mempool_free_slab, srb_cachep);
+ ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (ha->srb_mempool == NULL) {
qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
rval = QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ede158d08d9..8f010a314a3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void)
sgp->name);
}
- sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
- mempool_alloc_slab, mempool_free_slab,
- sgp->slab);
+ sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+ sgp->slab);
if (!sgp->pool) {
printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
sgp->name);
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index 3c987f49f6b..7a6db1c5c8c 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -29,6 +29,7 @@
#include <linux/kmod.h>
#include <linux/sem.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/mutex.h>
#define PHONE_NUM_DEVICES 256
@@ -37,7 +38,7 @@
*/
static struct phone_device *phone_device[PHONE_NUM_DEVICES];
-static DECLARE_MUTEX(phone_lock);
+static DEFINE_MUTEX(phone_lock);
/*
* Open a phone device.
@@ -53,14 +54,14 @@ static int phone_open(struct inode *inode, struct file *file)
if (minor >= PHONE_NUM_DEVICES)
return -ENODEV;
- down(&phone_lock);
+ mutex_lock(&phone_lock);
p = phone_device[minor];
if (p)
new_fops = fops_get(p->f_op);
if (!new_fops) {
- up(&phone_lock);
+ mutex_unlock(&phone_lock);
request_module("char-major-%d-%d", PHONE_MAJOR, minor);
- down(&phone_lock);
+ mutex_lock(&phone_lock);
p = phone_device[minor];
if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
{
@@ -78,7 +79,7 @@ static int phone_open(struct inode *inode, struct file *file)
}
fops_put(old_fops);
end:
- up(&phone_lock);
+ mutex_unlock(&phone_lock);
return err;
}
@@ -100,18 +101,18 @@ int phone_register_device(struct phone_device *p, int unit)
end = unit + 1; /* enter the loop at least one time */
}
- down(&phone_lock);
+ mutex_lock(&phone_lock);
for (i = base; i < end; i++) {
if (phone_device[i] == NULL) {
phone_device[i] = p;
p->minor = i;
devfs_mk_cdev(MKDEV(PHONE_MAJOR,i),
S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i);
- up(&phone_lock);
+ mutex_unlock(&phone_lock);
return 0;
}
}
- up(&phone_lock);
+ mutex_unlock(&phone_lock);
return -ENFILE;
}
@@ -121,12 +122,12 @@ int phone_register_device(struct phone_device *p, int unit)
void phone_unregister_device(struct phone_device *pfd)
{
- down(&phone_lock);
+ mutex_lock(&phone_lock);
if (phone_device[pfd->minor] != pfd)
panic("phone: bad unregister");
devfs_remove("phone/%d", pfd->minor);
phone_device[pfd->minor] = NULL;
- up(&phone_lock);
+ mutex_unlock(&phone_lock);
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 150b1922792..7bb716887e2 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
#endif
static int afs_file_readpage(struct file *file, struct page *page);
-static int afs_file_invalidatepage(struct page *page, unsigned long offset);
+static void afs_file_invalidatepage(struct page *page, unsigned long offset);
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
struct inode_operations afs_file_inode_operations = {
@@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page,
/*
* invalidate part or all of a page
*/
-static int afs_file_invalidatepage(struct page *page, unsigned long offset)
+static void afs_file_invalidatepage(struct page *page, unsigned long offset)
{
int ret = 1;
@@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
if (!PageWriteback(page))
ret = page->mapping->a_ops->releasepage(page,
0);
+ /* possibly should BUG_ON(!ret); - neilb */
}
}
_leave(" = %d", ret);
- return ret;
} /* end afs_file_invalidatepage() */
/*****************************************************************************/
diff --git a/fs/bio.c b/fs/bio.c
index 73e664c01d3..eb8fbc53f2c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
#define BIO_POOL_SIZE 256
-static kmem_cache_t *bio_slab;
+static kmem_cache_t *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 8
-mempool_t *bio_split_pool;
+mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;
@@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
return bp;
}
-static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
-{
- return kmalloc(sizeof(struct bio_pair), gfp_flags);
-}
-
-static void bio_pair_free(void *bp, void *data)
-{
- kfree(bp);
-}
-
/*
* create memory pools for biovec's in a bio_set.
@@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
if (i >= scale)
pool_entries >>= 1;
- *bvp = mempool_create(pool_entries, mempool_alloc_slab,
- mempool_free_slab, bp->slab);
+ *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
if (!*bvp)
return -ENOMEM;
}
@@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
if (!bs)
return NULL;
- bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
- mempool_free_slab, bio_slab);
-
+ bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
if (!bs->bio_pool)
goto bad;
@@ -1254,8 +1241,8 @@ static int __init init_bio(void)
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
- bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
- bio_pair_alloc, bio_pair_free, NULL);
+ bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
+ sizeof(struct bio_pair));
if (!bio_split_pool)
panic("bio: can't create split pool\n");
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 573fc8e0b67..5983d42df01 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
static int
blkdev_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks, struct buffer_head *bh, int create)
+ struct buffer_head *bh, int create)
{
sector_t end_block = max_block(I_BDEV(inode));
+ unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
if ((iblock + max_blocks) > end_block) {
max_blocks = end_block - iblock;
@@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
-static kmem_cache_t * bdev_cachep;
+static kmem_cache_t * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
@@ -308,7 +309,7 @@ static struct file_system_type bd_type = {
.kill_sb = kill_anon_super,
};
-static struct vfsmount *bd_mnt;
+static struct vfsmount *bd_mnt __read_mostly;
struct super_block *blockdev_superblock;
void __init bdev_cache_init(void)
diff --git a/fs/buffer.c b/fs/buffer.c
index 4342ab0ad99..d597758dd12 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%zu\n",
+ bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
@@ -1590,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page);
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
@@ -1621,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset)
* so real IO is not possible anymore.
*/
if (offset == 0)
- ret = try_to_release_page(page, 0);
+ try_to_release_page(page, 0);
out:
- return ret;
+ return;
}
EXPORT_SYMBOL(block_invalidatepage);
-int do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned long offset)
{
- int (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
- invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
+ void (*invalidatepage)(struct page *, unsigned long);
+ invalidatepage = page->mapping->a_ops->invalidatepage ? :
+ block_invalidatepage;
+ (*invalidatepage)(page, offset);
}
/*
@@ -1735,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
+ const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
@@ -1742,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits,
+ create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
@@ -1777,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
@@ -1930,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
@@ -2085,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
fully_mapped = 0;
if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
@@ -2406,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
create = 1;
if (block_start >= to)
create = 0;
+ map_bh.b_size = blocksize;
ret = get_block(inode, block_in_file + block_in_page,
&map_bh, create);
if (ret)
@@ -2666,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping,
err = 0;
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
@@ -2752,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
+ tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
@@ -3004,7 +3011,7 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
{
struct address_space *mapping;
@@ -3012,7 +3019,6 @@ int block_sync_page(struct page *page)
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
- return 0;
}
/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 221b3334b73..6b99b51d669 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -738,10 +738,8 @@ cifs_init_request_bufs(void)
cERROR(1,("cifs_min_rcv set to maximum (64)"));
}
- cifs_req_poolp = mempool_create(cifs_min_rcv,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_req_cachep);
+ cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
+ cifs_req_cachep);
if(cifs_req_poolp == NULL) {
kmem_cache_destroy(cifs_req_cachep);
@@ -771,10 +769,8 @@ cifs_init_request_bufs(void)
cFYI(1,("cifs_min_small set to maximum (256)"));
}
- cifs_sm_req_poolp = mempool_create(cifs_min_small,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_sm_req_cachep);
+ cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
+ cifs_sm_req_cachep);
if(cifs_sm_req_poolp == NULL) {
mempool_destroy(cifs_req_poolp);
@@ -808,10 +804,8 @@ cifs_init_mids(void)
if (cifs_mid_cachep == NULL)
return -ENOMEM;
- cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_mid_cachep);
+ /* 3 is a reasonable minimum number of simultaneous operations */
+ cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
if(cifs_mid_poolp == NULL) {
kmem_cache_destroy(cifs_mid_cachep);
return -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 165d6742638..fb49aef1f2e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
return rc;
}
-/* static int cifs_sync_page(struct page *page)
+/* static void cifs_sync_page(struct page *page)
{
struct address_space *mapping;
struct inode *inode;
@@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
return 0;
inode = mapping->host;
if (!inode)
- return 0; */
+ return; */
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
+#if 0
if (rc < 0)
return rc;
return 0;
+#endif
} */
/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ff93a9f81d1..598eec9778f 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
if (num_of_bytes < end_of_file)
cFYI(1, ("allocation size less than end of file"));
- cFYI(1,
- ("Size %ld and blocks %ld",
- (unsigned long) inode->i_size, inode->i_blocks));
+ cFYI(1, ("Size %ld and blocks %llu",
+ (unsigned long) inode->i_size,
+ (unsigned long long)inode->i_blocks));
if (S_ISREG(inode->i_mode)) {
cFYI(1, ("File inode"));
inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index edb3b6eb34b..488bd0d81dc 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode,
if (allocation_size < end_of_file)
cFYI(1, ("May be sparse file, allocation less than file size"));
- cFYI(1,
- ("File Size %ld and blocks %ld and blocksize %ld",
- (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks,
- tmp_inode->i_blksize));
+ cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld",
+ (unsigned long)tmp_inode->i_size,
+ (unsigned long long)tmp_inode->i_blocks,
+ tmp_inode->i_blksize));
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 8ad52f5bf25..acc1b2c10a8 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -22,6 +22,7 @@
#include <linux/cramfs_fs_sb.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -31,7 +32,7 @@ static struct inode_operations cramfs_dir_inode_operations;
static struct file_operations cramfs_directory_operations;
static struct address_space_operations cramfs_aops;
-static DECLARE_MUTEX(read_mutex);
+static DEFINE_MUTEX(read_mutex);
/* These two macros may change in future, to provide better st_ino
@@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
memset(sbi, 0, sizeof(struct cramfs_sb_info));
/* Invalidate the read buffers on mount: think disk change.. */
- down(&read_mutex);
+ mutex_lock(&read_mutex);
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
/* Read the first block and get the superblock from it */
memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
/* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) {
/* check at 512 byte offset */
- down(&read_mutex);
+ mutex_lock(&read_mutex);
memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
if (super.magic != CRAMFS_MAGIC) {
if (!silent)
printk(KERN_ERR "cramfs: wrong magic\n");
@@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
mode_t mode;
int namelen, error;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
name = (char *)(de+1);
@@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
memcpy(buf, name, namelen);
ino = CRAMINO(de);
mode = de->mode;
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen;
for (;;) {
if (!namelen) {
@@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
unsigned int offset = 0;
int sorted;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) {
struct cramfs_inode *de;
@@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
for (;;) {
if (!namelen) {
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
return ERR_PTR(-EIO);
}
if (name[namelen-1])
@@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
continue;
if (!retval) {
struct cramfs_inode entry = *de;
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
return NULL;
}
@@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
if (sorted)
break;
}
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
d_add(dentry, NULL);
return NULL;
}
@@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page)
u32 start_offset, compr_len;
start_offset = OFFSET(inode) + maxblock*4;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
if (page->index)
start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
pgdata = kmap(page);
if (compr_len == 0)
; /* hole */
else {
- down(&read_mutex);
+ mutex_lock(&read_mutex);
bytes_filled = cramfs_uncompress_block(pgdata,
PAGE_CACHE_SIZE,
cramfs_read(sb, start_offset, compr_len),
compr_len);
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
}
} else
pgdata = kmap(page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0778f49f993..19458d39950 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -35,7 +35,7 @@
#include <linux/bootmem.h>
-int sysctl_vfs_cache_pressure = 100;
+int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@ -43,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
EXPORT_SYMBOL(dcache_lock);
-static kmem_cache_t *dentry_cache;
+static kmem_cache_t *dentry_cache __read_mostly;
#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
@@ -58,9 +58,9 @@ static kmem_cache_t *dentry_cache;
#define D_HASHBITS d_hash_shift
#define D_HASHMASK d_hash_mask
-static unsigned int d_hash_mask;
-static unsigned int d_hash_shift;
-static struct hlist_head *dentry_hashtable;
+static unsigned int d_hash_mask __read_mostly;
+static unsigned int d_hash_shift __read_mostly;
+static struct hlist_head *dentry_hashtable __read_mostly;
static LIST_HEAD(dentry_unused);
/* Statistics gathering. */
@@ -1710,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages)
}
/* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep;
+kmem_cache_t *names_cachep __read_mostly;
/* SLAB cache for file structures */
-kmem_cache_t *filp_cachep;
+kmem_cache_t *filp_cachep __read_mostly;
EXPORT_SYMBOL(d_genocide);
diff --git a/fs/dcookies.c b/fs/dcookies.c
index f8274a8f83b..8749339bf4f 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/dcookies.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
/* The dcookies are allocated from a kmem_cache and
@@ -36,10 +37,10 @@ struct dcookie_struct {
};
static LIST_HEAD(dcookie_users);
-static DECLARE_MUTEX(dcookie_sem);
-static kmem_cache_t * dcookie_cache;
-static struct list_head * dcookie_hashtable;
-static size_t hash_size;
+static DEFINE_MUTEX(dcookie_mutex);
+static kmem_cache_t *dcookie_cache __read_mostly;
+static struct list_head *dcookie_hashtable __read_mostly;
+static size_t hash_size __read_mostly;
static inline int is_live(void)
{
@@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
int err = 0;
struct dcookie_struct * dcs;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
if (!is_live()) {
err = -EINVAL;
@@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
*cookie = dcookie_value(dcs);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return err;
}
@@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
if (!is_live()) {
err = -EINVAL;
@@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
out_free:
kfree(kbuf);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return err;
}
@@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void)
{
struct dcookie_user * user;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
if (!user)
@@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void)
list_add(&user->next, &dcookie_users);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return user;
out_free:
kfree(user);
@@ -313,7 +314,7 @@ out_free:
void dcookie_unregister(struct dcookie_user * user)
{
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
list_del(&user->next);
kfree(user);
@@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user)
if (!is_live())
dcookie_exit();
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
}
EXPORT_SYMBOL_GPL(dcookie_register);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 235ed8d1f11..9d1d2aa73e4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -86,12 +86,12 @@ struct dio {
unsigned first_block_in_page; /* doesn't change, Used only once */
int boundary; /* prev block is at a boundary */
int reap_counter; /* rate limit reaping */
- get_blocks_t *get_blocks; /* block mapping function */
+ get_block_t *get_block; /* block mapping function */
dio_iodone_t *end_io; /* IO completion function */
sector_t final_block_in_bio; /* current final block in bio + 1 */
sector_t next_block_for_io; /* next block to be put under IO,
in dio_blocks units */
- struct buffer_head map_bh; /* last get_blocks() result */
+ struct buffer_head map_bh; /* last get_block() result */
/*
* Deferred addition of a page to the dio. These variables are
@@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio)
/*
* Called when all DIO BIO I/O has been completed - let the filesystem
- * know, if it registered an interest earlier via get_blocks. Pass the
+ * know, if it registered an interest earlier via get_block. Pass the
* private field of the map buffer_head so that filesystems can use it
- * to hold additional state between get_blocks calls and dio_complete.
+ * to hold additional state between get_block calls and dio_complete.
*/
static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
{
@@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio)
* The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual.
*
- * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io
+ * get_block() is passed the number of i_blkbits-sized blocks which direct_io
* has remaining to do. The fs should not map more than this number of blocks.
*
* If the fs has mapped a lot of blocks, it should populate bh->b_size to
@@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio)
* In the case of filesystem holes: the fs may return an arbitrarily-large
* hole by returning an appropriate value in b_size and by clearing
* buffer_mapped(). However the direct-io code will only process holes one
- * block at a time - it will repeatedly call get_blocks() as it walks the hole.
+ * block at a time - it will repeatedly call get_block() as it walks the hole.
*/
static int get_more_blocks(struct dio *dio)
{
@@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio)
* at a higher level for inside-i_size block-instantiating
* writes.
*/
- ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count,
+ map_bh->b_size = fs_count << dio->blkbits;
+ ret = (*dio->get_block)(dio->inode, fs_startblk,
map_bh, create);
}
return ret;
@@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end)
* happily perform page-sized but 512-byte aligned IOs. It is important that
* blockdev IO be able to have fine alignment and large sizes.
*
- * So what we do is to permit the ->get_blocks function to populate bh.b_size
+ * So what we do is to permit the ->get_block function to populate bh.b_size
* with the size of IO which is permitted at this offset and this i_blkbits.
*
* For best results, the blockdev should be set up with 512-byte i_blkbits and
- * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives
+ * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
* fine alignment but still allows this function to work in PAGE_SIZE units.
*/
static int do_direct_IO(struct dio *dio)
@@ -947,7 +948,7 @@ out:
static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
const struct iovec *iov, loff_t offset, unsigned long nr_segs,
- unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io,
+ unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
struct dio *dio)
{
unsigned long user_addr;
@@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
dio->boundary = 0;
dio->reap_counter = 0;
- dio->get_blocks = get_blocks;
+ dio->get_block = get_block;
dio->end_io = end_io;
dio->map_bh.b_private = NULL;
dio->final_block_in_bio = -1;
@@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
+ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
int dio_lock_type)
{
int seg;
@@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
(end > i_size_read(inode)));
retval = direct_io_worker(rw, iocb, inode, iov, offset,
- nr_segs, blkbits, get_blocks, end_io, dio);
+ nr_segs, blkbits, get_block, end_io, dio);
if (rw == READ && dio_lock_type == DIO_LOCKING)
release_i_mutex = 0;
diff --git a/fs/dnotify.c b/fs/dnotify.c
index f3b540dd5d1..f932591df5a 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -21,9 +21,9 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
-int dir_notify_enable = 1;
+int dir_notify_enable __read_mostly = 1;
-static kmem_cache_t *dn_cache;
+static kmem_cache_t *dn_cache __read_mostly;
static void redo_inode_mask(struct inode *inode)
{
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a0f682cdd03..e067a06c646 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -281,13 +281,13 @@ static struct mutex epmutex;
static struct poll_safewake psw;
/* Slab cache used to allocate "struct epitem" */
-static kmem_cache_t *epi_cache;
+static kmem_cache_t *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
-static kmem_cache_t *pwq_cache;
+static kmem_cache_t *pwq_cache __read_mostly;
/* Virtual fs used to allocate inodes for eventpoll files */
-static struct vfsmount *eventpoll_mnt;
+static struct vfsmount *eventpoll_mnt __read_mostly;
/* File callbacks that implement the eventpoll file behaviour */
static struct file_operations eventpoll_fops = {
diff --git a/fs/exec.c b/fs/exec.c
index 995cba3c62b..c7397c46ad6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
- sig->real_timer.data = current;
+ sig->tsk = current;
spin_unlock_irq(lock);
if (hrtimer_cancel(&sig->real_timer))
hrtimer_restart(&sig->real_timer);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index a717837f272..04af9c45dce 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping,block,ext2_get_block);
}
-static int
-ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = ext2_get_block(inode, iblock, bh_result, create);
- if (ret == 0)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t
ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
@@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, ext2_get_blocks, NULL);
+ offset, nr_segs, ext2_get_block, NULL);
}
static int
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 46623f77666..77927d6938f 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
*/
static int
ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
- struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
+ struct buffer_head *bitmap_bh, int goal,
+ unsigned long *count, struct ext3_reserve_window *my_rsv)
{
int group_first_block, start, end;
+ unsigned long num = 0;
/* we do allocation within the reservation window if we have a window */
if (my_rsv) {
@@ -713,8 +715,18 @@ repeat:
goto fail_access;
goto repeat;
}
- return goal;
+ num++;
+ goal++;
+ while (num < *count && goal < end
+ && ext3_test_allocatable(goal, bitmap_bh)
+ && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
+ num++;
+ goal++;
+ }
+ *count = num;
+ return goal - num;
fail_access:
+ *count = num;
return -1;
}
@@ -999,6 +1011,31 @@ retry:
goto retry;
}
+static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
+ struct super_block *sb, int size)
+{
+ struct ext3_reserve_window_node *next_rsv;
+ struct rb_node *next;
+ spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
+
+ if (!spin_trylock(rsv_lock))
+ return;
+
+ next = rb_next(&my_rsv->rsv_node);
+
+ if (!next)
+ my_rsv->rsv_end += size;
+ else {
+ next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+
+ if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
+ my_rsv->rsv_end += size;
+ else
+ my_rsv->rsv_end = next_rsv->rsv_start - 1;
+ }
+ spin_unlock(rsv_lock);
+}
+
/*
* This is the main function used to allocate a new block and its reservation
* window.
@@ -1024,11 +1061,12 @@ static int
ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
unsigned int group, struct buffer_head *bitmap_bh,
int goal, struct ext3_reserve_window_node * my_rsv,
- int *errp)
+ unsigned long *count, int *errp)
{
unsigned long group_first_block;
int ret = 0;
int fatal;
+ unsigned long num = *count;
*errp = 0;
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
* or last attempt to allocate a block with reservation turned on failed
*/
if (my_rsv == NULL ) {
- ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
+ ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
+ goal, count, NULL);
goto out;
}
/*
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
while (1) {
if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
+ if (my_rsv->rsv_goal_size < *count)
+ my_rsv->rsv_goal_size = *count;
ret = alloc_new_reservation(my_rsv, goal, sb,
group, bitmap_bh);
if (ret < 0)
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
goal = -1;
- }
+ } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
+ try_to_extend_reservation(my_rsv, sb,
+ *count-my_rsv->rsv_end + goal - 1);
+
if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
|| (my_rsv->rsv_end < group_first_block))
BUG();
ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
- &my_rsv->rsv_window);
+ &num, &my_rsv->rsv_window);
if (ret >= 0) {
- my_rsv->rsv_alloc_hit++;
+ my_rsv->rsv_alloc_hit += num;
+ *count = num;
break; /* succeed */
}
+ num = *count;
}
out:
if (ret >= 0) {
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
* bitmap, and then for any free bit if that fails.
* This function also updates quota and i_blocks field.
*/
-int ext3_new_block(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
+int ext3_new_blocks(handle_t *handle, struct inode *inode,
+ unsigned long goal, unsigned long *count, int *errp)
{
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *gdp_bh;
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
static int goal_hits, goal_attempts;
#endif
unsigned long ngroups;
+ unsigned long num = *count;
*errp = -ENOSPC;
sb = inode->i_sb;
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (DQUOT_ALLOC_BLOCK(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1244,7 +1291,7 @@ retry:
if (!bitmap_bh)
goto io_error;
ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
- bitmap_bh, ret_block, my_rsv, &fatal);
+ bitmap_bh, ret_block, my_rsv, &num, &fatal);
if (fatal)
goto out;
if (ret_block >= 0)
@@ -1281,7 +1328,7 @@ retry:
if (!bitmap_bh)
goto io_error;
ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
- bitmap_bh, -1, my_rsv, &fatal);
+ bitmap_bh, -1, my_rsv, &num, &fatal);
if (fatal)
goto out;
if (ret_block >= 0)
@@ -1316,13 +1363,15 @@ allocated:
target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
+ le32_to_cpu(es->s_first_data_block);
- if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
- target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+ if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
+ in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
+ EXT3_SB(sb)->s_itb_per_group) ||
+ in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
EXT3_SB(sb)->s_itb_per_group))
ext3_error(sb, "ext3_new_block",
"Allocating block in system zone - "
- "block = %u", target_block);
+ "blocks from %u, length %lu", target_block, num);
performed_allocation = 1;
@@ -1341,10 +1390,14 @@ allocated:
jbd_lock_bh_state(bitmap_bh);
spin_lock(sb_bgl_lock(sbi, group_no));
if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
- if (ext3_test_bit(ret_block,
- bh2jh(bitmap_bh)->b_committed_data)) {
- printk("%s: block was unexpectedly set in "
- "b_committed_data\n", __FUNCTION__);
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (ext3_test_bit(ret_block,
+ bh2jh(bitmap_bh)->b_committed_data)) {
+ printk("%s: block was unexpectedly set in "
+ "b_committed_data\n", __FUNCTION__);
+ }
}
}
ext3_debug("found bit %d\n", ret_block);
@@ -1355,7 +1408,7 @@ allocated:
/* ret_block was blockgroup-relative. Now it becomes fs-relative */
ret_block = target_block;
- if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
+ if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
ext3_error(sb, "ext3_new_block",
"block(%d) >= blocks count(%d) - "
"block_group = %d, es == %p ", ret_block,
@@ -1373,9 +1426,9 @@ allocated:
spin_lock(sb_bgl_lock(sbi, group_no));
gdp->bg_free_blocks_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
spin_unlock(sb_bgl_lock(sbi, group_no));
- percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
+ percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1388,6 +1441,8 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
+ DQUOT_FREE_BLOCK(inode, *count-num);
+ *count = num;
return ret_block;
io_error:
@@ -1401,11 +1456,19 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, 1);
+ DQUOT_FREE_BLOCK(inode, *count);
brelse(bitmap_bh);
return 0;
}
+int ext3_new_block(handle_t *handle, struct inode *inode,
+ unsigned long goal, int *errp)
+{
+ unsigned long count = 1;
+
+ return ext3_new_blocks(handle, inode, goal, &count, errp);
+}
+
unsigned long ext3_count_free_blocks(struct super_block *sb)
{
unsigned long desc_count;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 773459164bb..38bd3f6ec14 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
- err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0);
- if (!err) {
+ err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+ &map_bh, 0, 0);
+ if (err > 0) {
page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra,
filp,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2c361377e0a..48ae0339af1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode);
/*
* Test whether an inode is a fast symlink.
*/
-static inline int ext3_inode_is_fast_symlink(struct inode *inode)
+static int ext3_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT3_I(inode)->i_file_acl ?
(inode->i_sb->s_blocksize >> 9) : 0;
- return (S_ISLNK(inode->i_mode) &&
- inode->i_blocks - ea_blocks == 0);
+ return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}
-/* The ext3 forget function must perform a revoke if we are freeing data
+/*
+ * The ext3 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be
* revoked in all cases.
*
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode)
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
*/
-
-int ext3_forget(handle_t *handle, int is_metadata,
- struct inode *inode, struct buffer_head *bh,
- int blocknr)
+int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
+ struct buffer_head *bh, int blocknr)
{
int err;
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata,
}
/*
- * Work out how many blocks we need to progress with the next chunk of a
+ * Work out how many blocks we need to proceed with the next chunk of a
* truncate transaction.
*/
-
static unsigned long blocks_for_truncate(struct inode *inode)
{
unsigned long needed;
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode)
* extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct
*/
-
static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode)
handle = start_transaction(inode);
if (IS_ERR(handle)) {
- /* If we're going to skip the normal cleanup, we still
- * need to make sure that the in-core orphan linked list
- * is properly cleaned up. */
+ /*
+ * If we're going to skip the normal cleanup, we still need to
+ * make sure that the in-core orphan linked list is properly
+ * cleaned up.
+ */
ext3_orphan_del(NULL, inode);
goto no_delete;
}
@@ -235,16 +233,6 @@ no_delete:
clear_inode(inode); /* We must guarantee clearing of inode... */
}
-static int ext3_alloc_block (handle_t *handle,
- struct inode * inode, unsigned long goal, int *err)
-{
- unsigned long result;
-
- result = ext3_new_block(handle, inode, goal, err);
- return result;
-}
-
-
typedef struct {
__le32 *p;
__le32 key;
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
p->bh = bh;
}
-static inline int verify_chain(Indirect *from, Indirect *to)
+static int verify_chain(Indirect *from, Indirect *to)
{
while (from <= to && from->key == *from->p)
from++;
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode,
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
- ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
+ ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
}
if (boundary)
- *boundary = (i_block & (ptrs - 1)) == (final - 1);
+ *boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
@@ -419,7 +407,6 @@ no_block:
*
* Caller must make sure that @ind is valid and will stay that way.
*/
-
static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
{
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
unsigned long colour;
/* Try to find previous block */
- for (p = ind->p - 1; p >= start; p--)
+ for (p = ind->p - 1; p >= start; p--) {
if (*p)
return le32_to_cpu(*p);
+ }
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
- * It is going to be refered from inode itself? OK, just put it into
- * the same cylinder group then.
+ * It is going to be referred to from the inode itself? OK, just put it
+ * into the same cylinder group then.
*/
bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
static unsigned long ext3_find_goal(struct inode *inode, long block,
Indirect chain[4], Indirect *partial)
{
- struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+ struct ext3_block_alloc_info *block_i;
+
+ block_i = EXT3_I(inode)->i_block_alloc_info;
/*
* try the heuristic for sequential allocation,
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
}
/**
+ * ext3_blks_to_allocate: Look up the block map and count the number
+ * of direct blocks need to be allocated for the given branch.
+ *
+ * @branch: chain of indirect blocks
+ * @k: number of blocks need for indirect blocks
+ * @blks: number of data blocks to be mapped.
+ * @blocks_to_boundary: the offset in the indirect block
+ *
+ * return the total number of blocks to be allocate, including the
+ * direct and indirect blocks.
+ */
+static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
+ int blocks_to_boundary)
+{
+ unsigned long count = 0;
+
+ /*
+ * Simple case, [t,d]Indirect block(s) has not allocated yet
+ * then it's clear blocks on that path have not allocated
+ */
+ if (k > 0) {
+ /* right now we don't handle cross boundary allocation */
+ if (blks < blocks_to_boundary + 1)
+ count += blks;
+ else
+ count += blocks_to_boundary + 1;
+ return count;
+ }
+
+ count++;
+ while (count < blks && count <= blocks_to_boundary &&
+ le32_to_cpu(*(branch[0].p + count)) == 0) {
+ count++;
+ }
+ return count;
+}
+
+/**
+ * ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ * @indirect_blks: the number of blocks need to allocate for indirect
+ * blocks
+ *
+ * @new_blocks: on return it will store the new block numbers for
+ * the indirect blocks(if needed) and the first direct block,
+ * @blks: on return it will store the total number of allocated
+ * direct blocks
+ */
+static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
+ unsigned long goal, int indirect_blks, int blks,
+ unsigned long long new_blocks[4], int *err)
+{
+ int target, i;
+ unsigned long count = 0;
+ int index = 0;
+ unsigned long current_block = 0;
+ int ret = 0;
+
+ /*
+ * Here we try to allocate the requested multiple blocks at once,
+ * on a best-effort basis.
+ * To build a branch, we should allocate blocks for
+ * the indirect blocks(if not allocated yet), and at least
+ * the first direct block of this branch. That's the
+ * minimum number of blocks need to allocate(required)
+ */
+ target = blks + indirect_blks;
+
+ while (1) {
+ count = target;
+ /* allocating blocks for indirect blocks and direct blocks */
+ current_block = ext3_new_blocks(handle,inode,goal,&count,err);
+ if (*err)
+ goto failed_out;
+
+ target -= count;
+ /* allocate blocks for indirect blocks */
+ while (index < indirect_blks && count) {
+ new_blocks[index++] = current_block++;
+ count--;
+ }
+
+ if (count > 0)
+ break;
+ }
+
+ /* save the new block number for the first direct block */
+ new_blocks[index] = current_block;
+
+ /* total number of blocks allocated for direct blocks */
+ ret = count;
+ *err = 0;
+ return ret;
+failed_out:
+ for (i = 0; i <index; i++)
+ ext3_free_blocks(handle, inode, new_blocks[i], 1);
+ return ret;
+}
+
+/**
* ext3_alloc_branch - allocate and set up a chain of blocks.
* @inode: owner
- * @num: depth of the chain (number of blocks to allocate)
+ * @indirect_blks: number of allocated indirect blocks
+ * @blks: number of allocated direct blocks
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
- * This function allocates @num blocks, zeroes out all but the last one,
+ * This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
* ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
-
static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
- int num,
- unsigned long goal,
- int *offsets,
- Indirect *branch)
+ int indirect_blks, int *blks, unsigned long goal,
+ int *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
- int n = 0, keys = 0;
+ int i, n = 0;
int err = 0;
- int i;
- int parent = ext3_alloc_block(handle, inode, goal, &err);
-
- branch[0].key = cpu_to_le32(parent);
- if (parent) {
- for (n = 1; n < num; n++) {
- struct buffer_head *bh;
- /* Allocate the next block */
- int nr = ext3_alloc_block(handle, inode, parent, &err);
- if (!nr)
- break;
- branch[n].key = cpu_to_le32(nr);
+ struct buffer_head *bh;
+ int num;
+ unsigned long long new_blocks[4];
+ unsigned long long current_block;
- /*
- * Get buffer_head for parent block, zero it out
- * and set the pointer to new one, then send
- * parent to disk.
- */
- bh = sb_getblk(inode->i_sb, parent);
- if (!bh)
- break;
- keys = n+1;
- branch[n].bh = bh;
- lock_buffer(bh);
- BUFFER_TRACE(bh, "call get_create_access");
- err = ext3_journal_get_create_access(handle, bh);
- if (err) {
- unlock_buffer(bh);
- brelse(bh);
- break;
- }
+ num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
+ *blks, new_blocks, &err);
+ if (err)
+ return err;
- memset(bh->b_data, 0, blocksize);
- branch[n].p = (__le32*) bh->b_data + offsets[n];
- *branch[n].p = branch[n].key;
- BUFFER_TRACE(bh, "marking uptodate");
- set_buffer_uptodate(bh);
+ branch[0].key = cpu_to_le32(new_blocks[0]);
+ /*
+ * metadata blocks and data blocks are allocated.
+ */
+ for (n = 1; n <= indirect_blks; n++) {
+ /*
+ * Get buffer_head for parent block, zero it out
+ * and set the pointer to new one, then send
+ * parent to disk.
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ branch[n].bh = bh;
+ lock_buffer(bh);
+ BUFFER_TRACE(bh, "call get_create_access");
+ err = ext3_journal_get_create_access(handle, bh);
+ if (err) {
unlock_buffer(bh);
+ brelse(bh);
+ goto failed;
+ }
- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- err = ext3_journal_dirty_metadata(handle, bh);
- if (err)
- break;
-
- parent = nr;
+ memset(bh->b_data, 0, blocksize);
+ branch[n].p = (__le32 *) bh->b_data + offsets[n];
+ branch[n].key = cpu_to_le32(new_blocks[n]);
+ *branch[n].p = branch[n].key;
+ if ( n == indirect_blks) {
+ current_block = new_blocks[n];
+ /*
+ * End of chain, update the last new metablock of
+ * the chain to point to the new allocated
+ * data blocks numbers
+ */
+ for (i=1; i < num; i++)
+ *(branch[n].p + i) = cpu_to_le32(++current_block);
}
- }
- if (n == num)
- return 0;
+ BUFFER_TRACE(bh, "marking uptodate");
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err)
+ goto failed;
+ }
+ *blks = num;
+ return err;
+failed:
/* Allocation failed, free what we already allocated */
- for (i = 1; i < keys; i++) {
+ for (i = 1; i <= n ; i++) {
BUFFER_TRACE(branch[i].bh, "call journal_forget");
ext3_journal_forget(handle, branch[i].bh);
}
- for (i = 0; i < keys; i++)
- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+ for (i = 0; i <indirect_blks; i++)
+ ext3_free_blocks(handle, inode, new_blocks[i], 1);
+
+ ext3_free_blocks(handle, inode, new_blocks[i], num);
+
return err;
}
/**
- * ext3_splice_branch - splice the allocated branch onto inode.
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- * ext3_alloc_branch)
- * @where: location of missing link
- * @num: number of blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
+ * ext3_splice_branch - splice the allocated branch onto inode.
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ * ext3_alloc_branch)
+ * @where: location of missing link
+ * @num: number of indirect blocks we are adding
+ * @blks: number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
*/
-
-static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
- Indirect chain[4], Indirect *where, int num)
+static int ext3_splice_branch(handle_t *handle, struct inode *inode,
+ long block, Indirect *where, int num, int blks)
{
int i;
int err = 0;
- struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+ struct ext3_block_alloc_info *block_i;
+ unsigned long current_block;
+ block_i = EXT3_I(inode)->i_block_alloc_info;
/*
* If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
*where->p = where->key;
/*
+ * Update the host buffer_head or inode to point to more just allocated
+ * direct blocks blocks
+ */
+ if (num == 0 && blks > 1) {
+ current_block = le32_to_cpu(where->key + 1);
+ for (i = 1; i < blks; i++)
+ *(where->p + i ) = cpu_to_le32(current_block++);
+ }
+
+ /*
* update the most recently allocated logical & physical block
* in i_block_alloc_info, to assist find the proper goal block for next
* allocation
*/
if (block_i) {
- block_i->last_alloc_logical_block = block;
- block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+ block_i->last_alloc_logical_block = block + blks - 1;
+ block_i->last_alloc_physical_block =
+ le32_to_cpu(where[num].key + blks - 1);
}
/* We are done with atomic stuff, now do the rest of housekeeping */
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
/* had we spliced it onto indirect block? */
if (where->bh) {
/*
- * akpm: If we spliced it onto an indirect block, we haven't
+ * If we spliced it onto an indirect block, we haven't
* altered the inode. Note however that if it is being spliced
* onto an indirect block at the very end of the file (the
* file is growing) then we *will* alter the inode to reflect
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
return err;
err_out:
- for (i = 1; i < num; i++) {
+ for (i = 1; i <= num; i++) {
BUFFER_TRACE(where[i].bh, "call journal_forget");
ext3_journal_forget(handle, where[i].bh);
+ ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
}
+ ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
return err;
}
@@ -666,26 +779,33 @@ err_out:
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
- * akpm: `handle' can be NULL if create == 0.
+ * `handle' can be NULL if create == 0.
*
* The BKL may not be held on entry here. Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
*/
-
-int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int extend_disksize)
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+ sector_t iblock, unsigned long maxblocks,
+ struct buffer_head *bh_result,
+ int create, int extend_disksize)
{
int err = -EIO;
int offsets[4];
Indirect chain[4];
Indirect *partial;
unsigned long goal;
- int left;
- int boundary = 0;
- const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+ int indirect_blks;
+ int blocks_to_boundary = 0;
+ int depth;
struct ext3_inode_info *ei = EXT3_I(inode);
+ int count = 0;
+ unsigned long first_block = 0;
+
J_ASSERT(handle != NULL || create == 0);
+ depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
goto out;
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
/* Simplest case - block found, no allocation needed */
if (!partial) {
+ first_block = chain[depth - 1].key;
clear_buffer_new(bh_result);
- goto got_it;
+ count++;
+ /*map more blocks*/
+ while (count < maxblocks && count <= blocks_to_boundary) {
+ if (!verify_chain(chain, partial)) {
+ /*
+ * Indirect block might be removed by
+ * truncate while we were reading it.
+ * Handling of that case: forget what we've
+ * got now. Flag the err as EAGAIN, so it
+ * will reread.
+ */
+ err = -EAGAIN;
+ count = 0;
+ break;
+ }
+ if (le32_to_cpu(*(chain[depth-1].p+count) ==
+ (first_block + count)))
+ count++;
+ else
+ break;
+ }
+ if (err != -EAGAIN)
+ goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
}
partial = ext3_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
+ count++;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
goal = ext3_find_goal(inode, iblock, chain, partial);
- left = (chain + depth) - partial;
+ /* the number of blocks need to allocate for [d,t]indirect blocks */
+ indirect_blks = (chain + depth) - partial - 1;
/*
+ * Next look up the indirect map to count the totoal number of
+ * direct blocks to allocate for this branch.
+ */
+ count = ext3_blks_to_allocate(partial, indirect_blks,
+ maxblocks, blocks_to_boundary);
+ /*
* Block out ext3_truncate while we alter the tree
*/
- err = ext3_alloc_branch(handle, inode, left, goal,
+ err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
offsets + (partial - chain), partial);
/*
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
- err = ext3_splice_branch(handle, inode, iblock, chain,
- partial, left);
+ err = ext3_splice_branch(handle, inode, iblock,
+ partial, indirect_blks, count);
/*
* i_disksize growing is protected by truncate_mutex. Don't forget to
* protect it if you're about to implement concurrent
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
set_buffer_new(bh_result);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
- if (boundary)
+ if (blocks_to_boundary == 0)
set_buffer_boundary(bh_result);
+ err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
@@ -787,34 +939,21 @@ out:
return err;
}
-static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- handle_t *handle = NULL;
- int ret;
-
- if (create) {
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 1);
- return ret;
-}
-
#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
-static int
-ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks, struct buffer_head *bh_result,
- int create)
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
{
handle_t *handle = journal_current_handle();
int ret = 0;
+ unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
- if (!handle)
+ if (!create)
goto get_block; /* A read */
+ if (max_blocks == 1)
+ goto get_block; /* A single block get */
+
if (handle->h_transaction->t_state == T_LOCKED) {
/*
* Huge direct-io writes can hold off commits for long
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
}
get_block:
- if (ret == 0)
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
+ if (ret == 0) {
+ ret = ext3_get_blocks_handle(handle, inode, iblock,
+ max_blocks, bh_result, create, 0);
+ if (ret > 0) {
+ bh_result->b_size = (ret << inode->i_blkbits);
+ ret = 0;
+ }
+ }
return ret;
}
/*
* `handle' can be NULL if create is zero
*/
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
- long block, int create, int * errp)
+struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
+ long block, int create, int *errp)
{
struct buffer_head dummy;
int fatal = 0, err;
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
dummy.b_state = 0;
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
+ err = ext3_get_blocks_handle(handle, inode, block, 1,
+ &dummy, create, 1);
+ if (err == 1) {
+ err = 0;
+ } else if (err >= 0) {
+ WARN_ON(1);
+ err = -EIO;
+ }
+ *errp = err;
+ if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (!bh) {
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
J_ASSERT(create != 0);
J_ASSERT(handle != 0);
- /* Now that we do not always journal data, we
- should keep in mind whether this should
- always journal the new buffer as metadata.
- For now, regular file writes use
- ext3_get_block instead, so it's not a
- problem. */
+ /*
+ * Now that we do not always journal data, we should
+ * keep in mind whether this should always journal the
+ * new buffer as metadata. For now, regular file
+ * writes use ext3_get_block instead, so it's not a
+ * problem.
+ */
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
fatal = ext3_journal_get_create_access(handle, bh);
if (!fatal && !buffer_uptodate(bh)) {
- memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+ memset(bh->b_data,0,inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
@@ -906,7 +1058,7 @@ err:
return NULL;
}
-struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
+struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
int block, int create, int *err)
{
struct buffer_head * bh;
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle,
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
-
-static int do_journal_get_write_access(handle_t *handle,
- struct buffer_head *bh)
+static int do_journal_get_write_access(handle_t *handle,
+ struct buffer_head *bh)
{
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
@@ -1025,8 +1176,7 @@ out:
return ret;
}
-int
-ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
int err = journal_dirty_data(handle, bh);
if (err)
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
* ext3 never places buffers on inode->i_mapping->private_list. metadata
* buffers are managed internally.
*/
-
static int ext3_ordered_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
* we don't need to open a transaction here.
*/
static int ext3_ordered_writepage(struct page *page,
- struct writeback_control *wbc)
+ struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct buffer_head *page_bufs;
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
}
-static int ext3_invalidatepage(struct page *page, unsigned long offset)
+static void ext3_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
if (offset == 0)
ClearPageChecked(page);
- return journal_invalidatepage(journal, page, offset);
+ journal_invalidatepage(journal, page, offset);
}
static int ext3_releasepage(struct page *page, gfp_t wait)
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
- ext3_direct_io_get_blocks, NULL);
+ ext3_get_block, NULL);
/*
- * Reacquire the handle: ext3_direct_io_get_block() can restart the
- * transaction
+ * Reacquire the handle: ext3_get_block() can restart the transaction
*/
handle = journal_current_handle();
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
* c) free the subtrees growing from the inode past the @chain[0].
* (no partially truncated stuff there). */
-static Indirect *ext3_find_shared(struct inode *inode,
- int depth,
- int offsets[4],
- Indirect chain[4],
- __le32 *top)
+static Indirect *ext3_find_shared(struct inode *inode, int depth,
+ int offsets[4], Indirect chain[4], __le32 *top)
{
Indirect *partial, *p;
int k, err;
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
}
/* Writer: end */
- while(partial > p)
- {
+ while(partial > p) {
brelse(partial->bh);
partial--;
}
@@ -1812,10 +1956,9 @@ no_top:
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*/
-static void
-ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
- unsigned long block_to_free, unsigned long count,
- __le32 *first, __le32 *last)
+static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, unsigned long block_to_free,
+ unsigned long count, __le32 *first, __le32 *last)
{
__le32 *p;
if (try_to_extend_transaction(handle, inode)) {
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
* that's fine - as long as they are linked from the inode, the post-crash
* ext3_truncate() run will find them and release them.
*/
-
-void ext3_truncate(struct inode * inode)
+void ext3_truncate(struct inode *inode)
{
handle_t *handle;
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode)
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
- default:
- nr = i_data[EXT3_IND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 1);
- i_data[EXT3_IND_BLOCK] = 0;
- }
- case EXT3_IND_BLOCK:
- nr = i_data[EXT3_DIND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 2);
- i_data[EXT3_DIND_BLOCK] = 0;
- }
- case EXT3_DIND_BLOCK:
- nr = i_data[EXT3_TIND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 3);
- i_data[EXT3_TIND_BLOCK] = 0;
- }
- case EXT3_TIND_BLOCK:
- ;
+ default:
+ nr = i_data[EXT3_IND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+ i_data[EXT3_IND_BLOCK] = 0;
+ }
+ case EXT3_IND_BLOCK:
+ nr = i_data[EXT3_DIND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+ i_data[EXT3_DIND_BLOCK] = 0;
+ }
+ case EXT3_DIND_BLOCK:
+ nr = i_data[EXT3_TIND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+ i_data[EXT3_TIND_BLOCK] = 0;
+ }
+ case EXT3_TIND_BLOCK:
+ ;
}
ext3_discard_reservation(inode);
@@ -2232,8 +2371,10 @@ do_indirects:
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
- /* In a multi-transaction truncate, we only make the final
- * transaction synchronous */
+ /*
+ * In a multi-transaction truncate, we only make the final transaction
+ * synchronous
+ */
if (IS_SYNC(inode))
handle->h_sync = 1;
out_stop:
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
struct ext3_group_desc * gdp;
- if ((ino != EXT3_ROOT_INO &&
- ino != EXT3_JOURNAL_INO &&
- ino != EXT3_RESIZE_INO &&
- ino < EXT3_FIRST_INO(sb)) ||
- ino > le32_to_cpu(
- EXT3_SB(sb)->s_es->s_inodes_count)) {
- ext3_error (sb, "ext3_get_inode_block",
+ if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO &&
+ ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) ||
+ ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) {
+ ext3_error(sb, "ext3_get_inode_block",
"bad inode number: %lu", ino);
return 0;
}
block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
if (block_group >= EXT3_SB(sb)->s_groups_count) {
- ext3_error (sb, "ext3_get_inode_block",
- "group >= groups count");
+ ext3_error(sb,"ext3_get_inode_block","group >= groups count");
return 0;
}
smp_rmb();
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
return 0;
}
- gdp = (struct ext3_group_desc *) bh->b_data;
+ gdp = (struct ext3_group_desc *)bh->b_data;
/*
* Figure out the offset within the block group inode table
*/
@@ -2834,7 +2971,7 @@ err_out:
/*
- * akpm: how many blocks doth make a writepage()?
+ * How many blocks doth make a writepage()?
*
* With N blocks per page, it may be:
* N data blocks
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
}
/*
- * akpm: What we do here is to mark the in-core inode as clean
- * with respect to inode dirtiness (it may still be data-dirty).
+ * What we do here is to mark the in-core inode as clean with respect to inode
+ * dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
* without having to perform any I/O. This is a very good thing,
* because *any* task may call prune_icache - even ones which
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
}
/*
- * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
+ * ext3_dirty_inode() is called from __mark_inode_dirty()
*
* We're really interested in the case where a file is being extended.
* i_size has been changed by generic_commit_write() and we thus need
@@ -2993,7 +3130,7 @@ out:
return;
}
-#ifdef AKPM
+#if 0
/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
@@ -3001,8 +3138,7 @@ out:
* returns no iloc structure, so the caller needs to repeat the iloc
* lookup to mark the inode dirty later.
*/
-static inline int
-ext3_pin_inode(handle_t *handle, struct inode *inode)
+static int ext3_pin_inode(handle_t *handle, struct inode *inode)
{
struct ext3_iloc iloc;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 86e443182de..f8a5266ea1f 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
if (test_opt(sb, NOBH)) {
- if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) {
- printk(KERN_WARNING "EXT3-fs: Ignoring nobh option "
- "since filesystem blocksize doesn't match "
- "pagesize\n");
- clear_opt(sbi->s_mount_opt, NOBH);
- }
if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
"its supported only with writeback mode\n");
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 297300fe81c..404bfc9f738 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
}
static int fat_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
int err;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
if (err)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 03c789560fb..2a2479196f9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -412,7 +412,7 @@ out:
/* Table to convert sigio signal codes into poll band bitmaps */
-static long band_table[NSIGPOLL] = {
+static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
}
static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache;
+static kmem_cache_t *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 39fd85b9b91..2c564701724 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = hfs_get_block(inode, iblock, bh_result, create);
- if (!ret)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
@@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, hfs_get_blocks, NULL);
+ offset, nr_segs, hfs_get_block, NULL);
}
static int hfs_writepages(struct address_space *mapping,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 12ed2b7d046..9fbe4d2aeec 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = hfsplus_get_block(inode, iblock, bh_result, create);
- if (!ret)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
@@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, hfsplus_get_blocks, NULL);
+ offset, nr_segs, hfsplus_get_block, NULL);
}
static int hfsplus_writepages(struct address_space *mapping,
diff --git a/fs/inode.c b/fs/inode.c
index 85da11044ad..1fddf2803af 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
#define I_HASHBITS i_hash_shift
#define I_HASHMASK i_hash_mask
-static unsigned int i_hash_mask;
-static unsigned int i_hash_shift;
+static unsigned int i_hash_mask __read_mostly;
+static unsigned int i_hash_shift __read_mostly;
/*
* Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
-static struct hlist_head *inode_hashtable;
+static struct hlist_head *inode_hashtable __read_mostly;
/*
* A simple spinlock to protect the list manipulations.
@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
*/
struct inodes_stat_t inodes_stat;
-static kmem_cache_t * inode_cachep;
+static kmem_cache_t * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e1785..f48a3dae071 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
static atomic_t inotify_cookie;
-static kmem_cache_t *watch_cachep;
-static kmem_cache_t *event_cachep;
+static kmem_cache_t *watch_cachep __read_mostly;
+static kmem_cache_t *event_cachep __read_mostly;
-static struct vfsmount *inotify_mnt;
+static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
-int inotify_max_user_instances;
-int inotify_max_user_watches;
-int inotify_max_queued_events;
+int inotify_max_user_instances __read_mostly;
+int inotify_max_user_watches __read_mostly;
+int inotify_max_queued_events __read_mostly;
/*
* Lock ordering:
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ada31fa272e..c609f5034fc 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1873,16 +1873,15 @@ zap_buffer_unlocked:
}
/**
- * int journal_invalidatepage()
+ * void journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
* @offset: length of page to invalidate.
*
* Reap page buffers containing data after offset in page.
*
- * Return non-zero if the page's buffers were successfully reaped.
*/
-int journal_invalidatepage(journal_t *journal,
+void journal_invalidatepage(journal_t *journal,
struct page *page,
unsigned long offset)
{
@@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal,
if (!PageLocked(page))
BUG();
if (!page_has_buffers(page))
- return 1;
+ return;
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
@@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal,
} while (bh != head);
if (!offset) {
- if (!may_free || !try_to_free_buffers(page))
- return 0;
- J_ASSERT(!page_has_buffers(page));
+ if (may_free && try_to_free_buffers(page))
+ J_ASSERT(!page_has_buffers(page));
}
- return 1;
}
/*
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 4db8be8e90c..5c63e0cdcf4 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -33,13 +33,14 @@
*/
#define STREAM_END_SPACE 12
-static DECLARE_MUTEX(deflate_sem);
-static DECLARE_MUTEX(inflate_sem);
+static DEFINE_MUTEX(deflate_mutex);
+static DEFINE_MUTEX(inflate_mutex);
static z_stream inf_strm, def_strm;
#ifdef __KERNEL__ /* Linux-only */
#include <linux/vmalloc.h>
#include <linux/init.h>
+#include <linux/mutex.h>
static int __init alloc_workspaces(void)
{
@@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in,
if (*dstlen <= STREAM_END_SPACE)
return -1;
- down(&deflate_sem);
+ mutex_lock(&deflate_mutex);
if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
printk(KERN_WARNING "deflateInit failed\n");
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return -1;
}
@@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
zlib_deflateEnd(&def_strm);
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return -1;
}
}
@@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
*sourcelen = def_strm.total_in;
ret = 0;
out:
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return ret;
}
@@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
int ret;
int wbits = MAX_WBITS;
- down(&inflate_sem);
+ mutex_lock(&inflate_mutex);
inf_strm.next_in = data_in;
inf_strm.avail_in = srclen;
@@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
printk(KERN_WARNING "inflateInit failed\n");
- up(&inflate_sem);
+ mutex_unlock(&inflate_mutex);
return 1;
}
@@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
printk(KERN_NOTICE "inflate returned %d\n", ret);
}
zlib_inflateEnd(&inf_strm);
- up(&inflate_sem);
+ mutex_unlock(&inflate_mutex);
return 0;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 51a5fed90cc..04eb78f1252 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
static int jfs_get_block(struct inode *ip, sector_t lblock,
struct buffer_head *bh_result, int create)
{
- return jfs_get_blocks(ip, lblock, 1, bh_result, create);
+ return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
+ bh_result, create);
}
static int jfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, jfs_get_blocks, NULL);
+ offset, nr_segs, jfs_get_block, NULL);
}
struct address_space_operations jfs_aops = {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0b348b13b55..3315f0b1fbc 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -69,6 +69,7 @@
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/mutex.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
@@ -165,7 +166,7 @@ do { \
*/
static LIST_HEAD(jfs_external_logs);
static struct jfs_log *dummy_log = NULL;
-static DECLARE_MUTEX(jfs_log_sem);
+static DEFINE_MUTEX(jfs_log_mutex);
/*
* forward references
@@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb)
if (sbi->mntflag & JFS_INLINELOG)
return open_inline_log(sb);
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
if (log->bdev->bd_dev == sbi->logdev) {
if (memcmp(log->uuid, sbi->loguuid,
sizeof(log->uuid))) {
jfs_warn("wrong uuid on JFS journal\n");
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -EINVAL;
}
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1))) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return rc;
}
goto journal_found;
@@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb)
}
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&log->sb_list);
@@ -1151,7 +1152,7 @@ journal_found:
sbi->log = log;
LOG_UNLOCK(log);
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return 0;
/*
@@ -1168,7 +1169,7 @@ journal_found:
blkdev_put(bdev);
free: /* free log descriptor */
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
@@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb)
{
int rc;
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
if (!dummy_log) {
dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
if (!dummy_log) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&dummy_log->sb_list);
@@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb)
if (rc) {
kfree(dummy_log);
dummy_log = NULL;
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return rc;
}
}
@@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb)
list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
JFS_SBI(sb)->log = dummy_log;
LOG_UNLOCK(dummy_log);
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return 0;
}
@@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb)
jfs_info("lmLogClose: log:0x%p", log);
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
LOG_LOCK(log);
list_del(&sbi->log_list);
LOG_UNLOCK(log);
@@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb)
kfree(log);
out:
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
jfs_info("lmLogClose: exit(%d)", rc);
return rc;
}
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 5fbaeaadccd..f28696f235c 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -220,8 +220,8 @@ int __init metapage_init(void)
if (metapage_cache == NULL)
return -ENOMEM;
- metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab,
- mempool_free_slab, metapage_cache);
+ metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
+ metapage_cache);
if (metapage_mempool == NULL) {
kmem_cache_destroy(metapage_cache);
@@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
return 0;
}
-static int metapage_invalidatepage(struct page *page, unsigned long offset)
+static void metapage_invalidatepage(struct page *page, unsigned long offset)
{
BUG_ON(offset);
- if (PageWriteback(page))
- return 0;
+ BUG_ON(PageWriteback(page));
- return metapage_releasepage(page, 0);
+ metapage_releasepage(page, 0);
}
struct address_space_operations jfs_metapage_aops = {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 112ebf8b8df..729ac427d35 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -16,6 +16,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
+#include <linux/mutex.h>
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
@@ -30,7 +31,7 @@
static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
static unsigned long next_gc;
static int nrhosts;
-static DECLARE_MUTEX(nlm_host_sema);
+static DEFINE_MUTEX(nlm_host_mutex);
static void nlm_gc_hosts(void);
@@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
/* Lock hash table */
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
@@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
nlm_hosts[hash] = host;
}
nlm_get_host(host);
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
}
@@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
next_gc = 0;
nohost:
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
@@ -141,19 +142,19 @@ nlm_find_client(void)
* and return it
*/
int hash;
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
struct nlm_host *host, **hp;
for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
if (host->h_server &&
host->h_killed == 0) {
nlm_get_host(host);
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
}
}
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return NULL;
}
@@ -265,7 +266,7 @@ nlm_shutdown_hosts(void)
int i;
dprintk("lockd: shutting down host module\n");
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
@@ -276,7 +277,7 @@ nlm_shutdown_hosts(void)
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
/* complain if any hosts are left */
if (nrhosts) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5e85bde6c12..fd56c8872f3 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
@@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program;
struct nlmsvc_binding * nlmsvc_ops;
EXPORT_SYMBOL(nlmsvc_ops);
-static DECLARE_MUTEX(nlmsvc_sema);
+static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static pid_t nlmsvc_pid;
int nlmsvc_grace_period;
unsigned long nlmsvc_timeout;
-static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_COMPLETION(lockd_start_done);
static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
/*
@@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp)
* Let our maker know we're running.
*/
nlmsvc_pid = current->pid;
- up(&lockd_start);
+ complete(&lockd_start_done);
daemonize("lockd");
@@ -215,7 +216,7 @@ lockd_up(void)
struct svc_serv * serv;
int error = 0;
- down(&nlmsvc_sema);
+ mutex_lock(&nlmsvc_mutex);
/*
* Unconditionally increment the user count ... this is
* the number of clients who _want_ a lockd process.
@@ -263,7 +264,7 @@ lockd_up(void)
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
- down(&lockd_start);
+ wait_for_completion(&lockd_start_done);
/*
* Note: svc_serv structures have an initial use count of 1,
@@ -272,7 +273,7 @@ lockd_up(void)
destroy_and_out:
svc_destroy(serv);
out:
- up(&nlmsvc_sema);
+ mutex_unlock(&nlmsvc_mutex);
return error;
}
EXPORT_SYMBOL(lockd_up);
@@ -285,7 +286,7 @@ lockd_down(void)
{
static int warned;
- down(&nlmsvc_sema);
+ mutex_lock(&nlmsvc_mutex);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
@@ -315,7 +316,7 @@ lockd_down(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
out:
- up(&nlmsvc_sema);
+ mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL(lockd_down);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index c7a6e3ae44d..a570e5c8a93 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/time.h>
#include <linux/in.h>
+#include <linux/mutex.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/nfsfh.h>
@@ -28,7 +29,7 @@
#define FILE_HASH_BITS 5
#define FILE_NRHASH (1<<FILE_HASH_BITS)
static struct nlm_file * nlm_files[FILE_NRHASH];
-static DECLARE_MUTEX(nlm_file_sema);
+static DEFINE_MUTEX(nlm_file_mutex);
#ifdef NFSD_DEBUG
static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
@@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
hash = file_hash(f);
/* Lock file table */
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
for (file = nlm_files[hash]; file; file = file->f_next)
if (!nfs_compare_fh(&file->f_handle, f))
@@ -130,7 +131,7 @@ found:
nfserr = 0;
out_unlock:
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return nfserr;
out_free:
@@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action)
struct nlm_file *file, **fp;
int i;
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
for (i = 0; i < FILE_NRHASH; i++) {
fp = nlm_files + i;
while ((file = *fp) != NULL) {
/* Traverse locks, blocks and shares of this file
* and update file->f_locks count */
if (nlm_inspect_file(host, file, action)) {
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return 1;
}
@@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
}
}
}
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return 0;
}
@@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file)
file, file->f_count);
/* Lock file table */
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
/* If there are no more locks etc, delete the file */
if(--file->f_count == 0) {
@@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file)
nlm_delete_file(file);
}
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 56f996e98bb..4d9e71d43e7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,7 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
-static kmem_cache_t *filelock_cache;
+static kmem_cache_t *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)
@@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter)
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
- if (!list_empty(&waiter->fl_block)) {
- printk(KERN_ERR "locks_insert_block: removing duplicated lock "
- "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
- waiter->fl_start, waiter->fl_end, waiter->fl_type);
- __locks_delete_block(waiter);
- }
+ BUG_ON(!list_empty(&waiter->fl_block));
list_add_tail(&waiter->fl_block, &blocker->fl_block);
waiter->fl_next = blocker;
if (IS_POSIX(blocker))
@@ -797,9 +792,7 @@ out:
return error;
}
-EXPORT_SYMBOL(posix_lock_file);
-
-static int __posix_lock_file(struct inode *inode, struct file_lock *request)
+static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
struct file_lock *fl;
struct file_lock *new_fl, *new_fl2;
@@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
continue;
if (!posix_locks_conflict(request, fl))
continue;
+ if (conflock)
+ locks_copy_lock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
@@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
*/
int posix_lock_file(struct file *filp, struct file_lock *fl)
{
- return __posix_lock_file(filp->f_dentry->d_inode, fl);
+ return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL);
+}
+EXPORT_SYMBOL(posix_lock_file);
+
+/**
+ * posix_lock_file_conf - Apply a POSIX-style lock to a file
+ * @filp: The file to apply the lock to
+ * @fl: The lock to be applied
+ * @conflock: Place to return a copy of the conflicting lock, if found.
+ *
+ * Except for the conflock parameter, acts just like posix_lock_file.
+ */
+int posix_lock_file_conf(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock);
}
+EXPORT_SYMBOL(posix_lock_file_conf);
/**
* posix_lock_file_wait - Apply a POSIX-style lock to a file
@@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
int error;
might_sleep ();
for (;;) {
- error = __posix_lock_file(filp->f_dentry->d_inode, fl);
+ error = posix_lock_file(filp, fl);
if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
fl.fl_end = offset + count - 1;
for (;;) {
- error = __posix_lock_file(inode, &fl);
+ error = __posix_lock_file_conf(inode, &fl, NULL);
if (error != -EAGAIN)
break;
if (!(fl.fl_flags & FL_SLEEP))
@@ -1694,7 +1705,7 @@ again:
error = filp->f_op->lock(filp, cmd, file_lock);
else {
for (;;) {
- error = __posix_lock_file(inode, file_lock);
+ error = posix_lock_file(filp, file_lock);
if ((error != -EAGAIN) || (cmd == F_SETLK))
break;
error = wait_event_interruptible(file_lock->fl_wait,
@@ -1837,7 +1848,7 @@ again:
error = filp->f_op->lock(filp, cmd, file_lock);
else {
for (;;) {
- error = __posix_lock_file(inode, file_lock);
+ error = posix_lock_file(filp, file_lock);
if ((error != -EAGAIN) || (cmd == F_SETLK64))
break;
error = wait_event_interruptible(file_lock->fl_wait,
diff --git a/fs/mpage.c b/fs/mpage.c
index e431cb3878d..9bf2eb30e6f 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
} while (page_bh != head);
}
+/*
+ * This is the worker routine which does all the work of mapping the disk
+ * blocks and constructs largest possible bios, submits them for IO if the
+ * blocks are not contiguous on the disk.
+ *
+ * We pass a buffer_head back and forth and use its buffer_mapped() flag to
+ * represent the validity of its disk mapping and to decide when to do the next
+ * get_block() call.
+ */
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
- sector_t *last_block_in_bio, get_block_t get_block)
+ sector_t *last_block_in_bio, struct buffer_head *map_bh,
+ unsigned long *first_logical_block, get_block_t get_block)
{
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
+ sector_t last_block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE];
unsigned page_block;
unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL;
- struct buffer_head bh;
int length;
int fully_mapped = 1;
+ unsigned nblocks;
+ unsigned relative_block;
if (page_has_buffers(page))
goto confused;
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
- last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+ page_block = 0;
+
+ /*
+ * Map blocks using the result from the previous get_blocks call first.
+ */
+ nblocks = map_bh->b_size >> blkbits;
+ if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
+ block_in_file < (*first_logical_block + nblocks)) {
+ unsigned map_offset = block_in_file - *first_logical_block;
+ unsigned last = nblocks - map_offset;
+
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == last) {
+ clear_buffer_mapped(map_bh);
+ break;
+ }
+ if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh->b_blocknr + map_offset +
+ relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ bdev = map_bh->b_bdev;
+ }
+
+ /*
+ * Then do more get_blocks calls until we are done with this page.
+ */
+ map_bh->b_page = page;
+ while (page_block < blocks_per_page) {
+ map_bh->b_state = 0;
+ map_bh->b_size = 0;
- bh.b_page = page;
- for (page_block = 0; page_block < blocks_per_page;
- page_block++, block_in_file++) {
- bh.b_state = 0;
if (block_in_file < last_block) {
- if (get_block(inode, block_in_file, &bh, 0))
+ map_bh->b_size = (last_block-block_in_file) << blkbits;
+ if (get_block(inode, block_in_file, map_bh, 0))
goto confused;
+ *first_logical_block = block_in_file;
}
- if (!buffer_mapped(&bh)) {
+ if (!buffer_mapped(map_bh)) {
fully_mapped = 0;
if (first_hole == blocks_per_page)
first_hole = page_block;
+ page_block++;
+ block_in_file++;
+ clear_buffer_mapped(map_bh);
continue;
}
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
* we just collected from get_block into the page's buffers
* so readpage doesn't have to repeat the get_block call
*/
- if (buffer_uptodate(&bh)) {
- map_buffer_to_page(page, &bh, page_block);
+ if (buffer_uptodate(map_bh)) {
+ map_buffer_to_page(page, map_bh, page_block);
goto confused;
}
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != bh.b_blocknr-1)
+ if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
goto confused;
- blocks[page_block] = bh.b_blocknr;
- bdev = bh.b_bdev;
+ nblocks = map_bh->b_size >> blkbits;
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == nblocks) {
+ clear_buffer_mapped(map_bh);
+ break;
+ } else if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh->b_blocknr+relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ bdev = map_bh->b_bdev;
}
if (first_hole != blocks_per_page) {
@@ -260,7 +319,7 @@ alloc_new:
goto alloc_new;
}
- if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
+ if (buffer_boundary(map_bh) || (first_hole != blocks_per_page))
bio = mpage_bio_submit(READ, bio);
else
*last_block_in_bio = blocks[blocks_per_page - 1];
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned page_idx;
sector_t last_block_in_bio = 0;
struct pagevec lru_pvec;
+ struct buffer_head map_bh;
+ unsigned long first_logical_block = 0;
+ clear_buffer_mapped(&map_bh);
pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru);
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
page->index, GFP_KERNEL)) {
bio = do_mpage_readpage(bio, page,
nr_pages - page_idx,
- &last_block_in_bio, get_block);
+ &last_block_in_bio, &map_bh,
+ &first_logical_block,
+ get_block);
if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec);
} else {
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
+ struct buffer_head map_bh;
+ unsigned long first_logical_block = 0;
- bio = do_mpage_readpage(bio, page, 1,
- &last_block_in_bio, get_block);
+ clear_buffer_mapped(&map_bh);
+ bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
+ &map_bh, &first_logical_block, get_block);
if (bio)
mpage_bio_submit(READ, bio);
return 0;
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
for (page_block = 0; page_block < blocks_per_page; ) {
map_bh.b_state = 0;
+ map_bh.b_size = 1 << blkbits;
if (get_block(inode, block_in_file, &map_bh, 1))
goto confused;
if (buffer_new(&map_bh))
diff --git a/fs/namespace.c b/fs/namespace.c
index 71e75bcf4d2..e069a4c5e38 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
-static struct list_head *mount_hashtable;
+static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache;
+static kmem_cache_t *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
/* /sys/fs */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 99d2cfbce86..90c95adc8c1 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
+#include <linux/mutex.h>
#include <net/inet_sock.h>
@@ -31,7 +32,7 @@ struct nfs_callback_data {
};
static struct nfs_callback_data nfs_callback_info;
-static DECLARE_MUTEX(nfs_callback_sema);
+static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
@@ -95,7 +96,7 @@ int nfs_callback_up(void)
int ret = 0;
lock_kernel();
- down(&nfs_callback_sema);
+ mutex_lock(&nfs_callback_mutex);
if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
goto out;
init_completion(&nfs_callback_info.started);
@@ -121,7 +122,7 @@ int nfs_callback_up(void)
nfs_callback_info.serv = serv;
wait_for_completion(&nfs_callback_info.started);
out:
- up(&nfs_callback_sema);
+ mutex_unlock(&nfs_callback_mutex);
unlock_kernel();
return ret;
out_destroy:
@@ -139,7 +140,7 @@ int nfs_callback_down(void)
int ret = 0;
lock_kernel();
- down(&nfs_callback_sema);
+ mutex_lock(&nfs_callback_mutex);
nfs_callback_info.users--;
do {
if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
@@ -147,7 +148,7 @@ int nfs_callback_down(void)
if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
break;
} while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
- up(&nfs_callback_sema);
+ mutex_unlock(&nfs_callback_mutex);
unlock_kernel();
return ret;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5263b2864a4..dee49a0cb99 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
return status;
}
-static int nfs_invalidate_page(struct page *page, unsigned long offset)
+static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
/* FIXME: we really should cancel any unstarted writes on this page */
- return 1;
}
static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 3961524fd4a..624ca7146b6 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -663,10 +663,8 @@ int nfs_init_readpagecache(void)
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
- nfs_rdata_mempool = mempool_create(MIN_POOL_READ,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_rdata_cachep);
+ nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
+ nfs_rdata_cachep);
if (nfs_rdata_mempool == NULL)
return -ENOMEM;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3f5225404c9..4cfada2cc09 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void)
if (nfs_wdata_cachep == NULL)
return -ENOMEM;
- nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_wdata_cachep);
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
if (nfs_wdata_mempool == NULL)
return -ENOMEM;
- nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_wdata_cachep);
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
return -ENOMEM;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6ab762bea9..47ec112b266 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,6 +49,7 @@
#include <linux/nfsd/state.h>
#include <linux/nfsd/xdr4.h>
#include <linux/namei.h>
+#include <linux/mutex.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir);
/* Locking:
*
- * client_sema:
+ * client_mutex:
* protects clientid_hashtbl[], clientstr_hashtbl[],
* unconfstr_hashtbl[], uncofid_hashtbl[].
*/
-static DECLARE_MUTEX(client_sema);
+static DEFINE_MUTEX(client_mutex);
static kmem_cache_t *stateowner_slab = NULL;
static kmem_cache_t *file_slab = NULL;
@@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL;
void
nfs4_lock_state(void)
{
- down(&client_sema);
+ mutex_lock(&client_mutex);
}
void
nfs4_unlock_state(void)
{
- up(&client_sema);
+ mutex_unlock(&client_mutex);
}
static inline u32
@@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
* Note: locks.c uses the BKL to protect the inode's lock list.
*/
- status = posix_lock_file(filp, &file_lock);
- dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status);
+ /* XXX?: Just to divert the locks_release_private at the start of
+ * locks_copy_lock: */
+ conflock.fl_ops = NULL;
+ conflock.fl_lmops = NULL;
+ status = posix_lock_file_conf(filp, &file_lock, &conflock);
+ dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
switch (-status) {
case 0: /* success! */
update_stateid(&lock_stp->st_stateid);
memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
sizeof(stateid_t));
- goto out;
- case (EAGAIN):
- goto conflicting_lock;
+ break;
+ case (EAGAIN): /* conflock holds conflicting lock */
+ status = nfserr_denied;
+ dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
+ nfs4_set_lock_denied(&conflock, &lock->lk_denied);
+ break;
case (EDEADLK):
status = nfserr_deadlock;
- dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
- goto out;
+ break;
default:
- status = nfserrno(status);
- dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
- goto out;
- }
-
-conflicting_lock:
- dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
- status = nfserr_denied;
- /* XXX There is a race here. Future patch needed to provide
- * an atomic posix_lock_and_test_file
- */
- if (!posix_test_lock(filp, &file_lock, &conflock)) {
- status = nfserr_serverfault;
- goto out;
+ dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status);
+ status = nfserr_resource;
+ break;
}
- nfs4_set_lock_denied(&conflock, &lock->lk_denied);
out:
if (status && lock->lk_is_new && lock_sop)
release_stateowner(lock_sop);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 0fd70295cca..4af2ad1193e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
log_page_size = PAGE_CACHE_SIZE;
log_page_mask = log_page_size - 1;
/*
- * Use generic_ffs() instead of ffs() to enable the compiler to
+ * Use ntfs_ffs() instead of ffs() to enable the compiler to
* optimize log_page_size and log_page_bits into constants.
*/
- log_page_bits = generic_ffs(log_page_size) - 1;
+ log_page_bits = ntfs_ffs(log_page_size) - 1;
size &= ~(s64)(log_page_size - 1);
/*
* Ensure the log file is big enough to store at least the two restart
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 4e72bc7afdf..2438c00ec0c 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2670,7 +2670,7 @@ mft_rec_already_initialized:
ni->name_len = 4;
ni->itype.index.block_size = 4096;
- ni->itype.index.block_size_bits = generic_ffs(4096) - 1;
+ ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
ni->itype.index.collation_rule = COLLATION_FILE_NAME;
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index 0624c8ef4d9..166142960b5 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
/* From fs/ntfs/upcase.c */
extern ntfschar *generate_default_upcase(void);
+static inline int ntfs_ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
#endif /* _LINUX_NTFS_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bf931ba1d36..0d858d0b25b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -540,7 +540,6 @@ bail:
* fs_count, map_bh, dio->rw == WRITE);
*/
static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result, int create)
{
int ret;
@@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
u64 p_blkno;
int contig_blocks;
unsigned char blocksize_bits;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
if (!inode || !bh_result) {
mlog(ML_ERROR, "inode or bh_result is null\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index ae3440ca083..6a610ae5358 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
BUG_ON(!bh);
BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
- mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n",
+ mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
(unsigned long long)bh->b_blocknr, type,
(type == OCFS2_JOURNAL_ACCESS_CREATE) ?
"OCFS2_JOURNAL_ACCESS_CREATE" :
@@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
}
mlog(0, "inode->i_size = %lld\n", inode->i_size);
- mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks);
+ mlog(0, "inode->i_blocks = %llu\n",
+ (unsigned long long)inode->i_blocks);
mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
/* call the kernels journal init function now */
@@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode)
memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
- mlog(0, "Force reading %lu blocks\n",
- (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9)));
+ mlog(0, "Force reading %llu blocks\n",
+ (unsigned long long)(inode->i_blocks >>
+ (inode->i_sb->s_blocksize_bits - 9)));
v_blkno = 0;
while (v_blkno <
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 274f61d0cda..0673862c8bd 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
* write i_size + 1 bytes. */
blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n",
- inode->i_blocks, i_size_read(inode), blocks);
+ mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
+ (unsigned long long)inode->i_blocks,
+ i_size_read(inode), blocks);
/* Sanity check -- make sure we're going to fit. */
if (bytes_left >
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c
index 87f50444fd3..3f0a780c9ce 100644
--- a/fs/partitions/devfs.c
+++ b/fs/partitions/devfs.c
@@ -6,7 +6,7 @@
#include <linux/vmalloc.h>
#include <linux/genhd.h>
#include <linux/bitops.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
struct unique_numspace {
@@ -16,7 +16,7 @@ struct unique_numspace {
struct semaphore mutex;
};
-static DECLARE_MUTEX(numspace_mutex);
+static DEFINE_MUTEX(numspace_mutex);
static int expand_numspace(struct unique_numspace *s)
{
@@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s)
{
int rval = 0;
- down(&numspace_mutex);
+ mutex_lock(&numspace_mutex);
if (s->num_free < 1)
rval = expand_numspace(s);
if (!rval) {
@@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s)
--s->num_free;
__set_bit(rval, s->bits);
}
- up(&numspace_mutex);
+ mutex_unlock(&numspace_mutex);
return rval;
}
@@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number)
int old_val;
if (number >= 0) {
- down(&numspace_mutex);
+ mutex_lock(&numspace_mutex);
old_val = __test_and_clear_bit(number, s->bits);
if (old_val)
++s->num_free;
- up(&numspace_mutex);
+ mutex_unlock(&numspace_mutex);
}
}
diff --git a/fs/pipe.c b/fs/pipe.c
index d976866a115..4384c929094 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -675,7 +675,7 @@ fail_page:
return NULL;
}
-static struct vfsmount *pipe_mnt;
+static struct vfsmount *pipe_mnt __read_mostly;
static int pipefs_delete_dentry(struct dentry *dentry)
{
return 1;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7eb1bd7f800..7a76ad57023 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
unsigned long rsslim = 0;
- DEFINE_KTIME(it_real_value);
struct task_struct *t;
char tcomm[sizeof(task->comm)];
@@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
utime = cputime_add(utime, task->signal->utime);
stime = cputime_add(stime, task->signal->stime);
}
- it_real_value = task->signal->real_timer.expires;
}
ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
read_unlock(&tasklist_lock);
@@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
start_time = nsec_to_clock_t(start_time);
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
-%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
+%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
task->pid,
tcomm,
@@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
priority,
nice,
num_threads,
- (long) ktime_to_clock_t(it_real_value),
start_time,
vsize,
mm ? get_mm_rss(mm) : 0,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 20e5c4509a4..47b7a20d45e 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,6 +19,7 @@
#include <linux/idr.h>
#include <linux/namei.h>
#include <linux/bitops.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
static loff_t proc_file_lseek(struct file *, loff_t, int);
+DEFINE_SPINLOCK(proc_subdir_lock);
+
int proc_match(int len, const char *name, struct proc_dir_entry *de)
{
if (de->namelen != len)
@@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name,
const char *cp = name, *next;
struct proc_dir_entry *de;
int len;
+ int rtn = 0;
+ spin_lock(&proc_subdir_lock);
de = &proc_root;
while (1) {
next = strchr(cp, '/');
@@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name,
if (proc_match(len, cp, de))
break;
}
- if (!de)
- return -ENOENT;
+ if (!de) {
+ rtn = -ENOENT;
+ goto out;
+ }
cp += len + 1;
}
*residual = cp;
*ret = de;
- return 0;
+out:
+ spin_unlock(&proc_subdir_lock);
+ return rtn;
}
static DEFINE_IDR(proc_inum_idr);
@@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
int error = -ENOENT;
lock_kernel();
+ spin_lock(&proc_subdir_lock);
de = PDE(dir);
if (de) {
for (de = de->subdir; de ; de = de->next) {
@@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
unsigned int ino = de->low_ino;
+ spin_unlock(&proc_subdir_lock);
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
+ spin_lock(&proc_subdir_lock);
break;
}
}
}
+ spin_unlock(&proc_subdir_lock);
unlock_kernel();
if (inode) {
@@ -447,11 +460,13 @@ int proc_readdir(struct file * filp,
filp->f_pos++;
/* fall through */
default:
+ spin_lock(&proc_subdir_lock);
de = de->subdir;
i -= 2;
for (;;) {
if (!de) {
ret = 1;
+ spin_unlock(&proc_subdir_lock);
goto out;
}
if (!i)
@@ -461,12 +476,16 @@ int proc_readdir(struct file * filp,
}
do {
+ /* filldir passes info to user space */
+ spin_unlock(&proc_subdir_lock);
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
de->low_ino, de->mode >> 12) < 0)
goto out;
+ spin_lock(&proc_subdir_lock);
filp->f_pos++;
de = de->next;
} while (de);
+ spin_unlock(&proc_subdir_lock);
}
ret = 1;
out: unlock_kernel();
@@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
if (i == 0)
return -EAGAIN;
dp->low_ino = i;
+
+ spin_lock(&proc_subdir_lock);
dp->next = dir->subdir;
dp->parent = dir;
dir->subdir = dp;
+ spin_unlock(&proc_subdir_lock);
+
if (S_ISDIR(dp->mode)) {
if (dp->proc_iops == NULL) {
dp->proc_fops = &proc_dir_operations;
@@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
+
+ spin_lock(&proc_subdir_lock);
for (p = &parent->subdir; *p; p=&(*p)->next ) {
if (!proc_match(len, fn, *p))
continue;
@@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
}
break;
}
+ spin_unlock(&proc_subdir_lock);
out:
return;
}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 9bdd077d6f5..596b4b4f1cc 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np,
* properties are quite unimportant for us though, thus we
* simply "skip" them here, but we do have to check.
*/
+ spin_lock(&proc_subdir_lock);
for (ent = de->subdir; ent != NULL; ent = ent->next)
if (!strcmp(ent->name, pp->name))
break;
+ spin_unlock(&proc_subdir_lock);
if (ent != NULL) {
printk(KERN_WARNING "device-tree: property \"%s\" name"
" conflicts with node in %s\n", pp->name,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d60f6238c66..9857e50f85e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
direct_IO request. */
static int reiserfs_get_blocks_direct_io(struct inode *inode,
sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result,
int create)
{
@@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
}
/* clm -- taken from fs/buffer.c:block_invalidate_page */
-static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
+static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
struct inode *inode = page->mapping->host;
@@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
- if (!offset && ret)
+ if (!offset && ret) {
ret = try_to_release_page(page, 0);
+ /* maybe should BUG_ON(!ret); - neilb */
+ }
out:
- return ret;
+ return;
}
static int reiserfs_set_page_dirty(struct page *page)
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 78b40621b88..27bd3a1df2a 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
char b[BDEVNAME_SIZE];
sprintf(buf,
- "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+ "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
bdevname(bh->b_bdev, b), bh->b_size,
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
diff --git a/fs/super.c b/fs/super.c
index 8743e9bbb29..a66f66bb804 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -37,6 +37,7 @@
#include <linux/writeback.h> /* for the emergency remount stuff */
#include <linux/idr.h>
#include <linux/kobject.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -380,9 +381,9 @@ restart:
void sync_filesystems(int wait)
{
struct super_block *sb;
- static DECLARE_MUTEX(mutex);
+ static DEFINE_MUTEX(mutex);
- down(&mutex); /* Could be down_interruptible */
+ mutex_lock(&mutex); /* Could be down_interruptible */
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
if (!sb->s_op->sync_fs)
@@ -411,7 +412,7 @@ restart:
goto restart;
}
spin_unlock(&sb_lock);
- up(&mutex);
+ mutex_unlock(&mutex);
}
/**
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 97fc056130e..c02f7c5b746 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1310,20 +1310,21 @@ xfs_get_block(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock, 0, bh_result,
- create, 0, BMAPI_WRITE);
+ return __xfs_get_block(inode, iblock,
+ bh_result->b_size >> inode->i_blkbits,
+ bh_result, create, 0, BMAPI_WRITE);
}
STATIC int
xfs_get_blocks_direct(
struct inode *inode,
sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock, max_blocks, bh_result,
- create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+ return __xfs_get_block(inode, iblock,
+ bh_result->b_size >> inode->i_blkbits,
+ bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
STATIC void
@@ -1442,14 +1443,14 @@ xfs_vm_readpages(
return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
}
-STATIC int
+STATIC void
xfs_vm_invalidatepage(
struct page *page,
unsigned long offset)
{
xfs_page_trace(XFS_INVALIDPAGE_ENTER,
page->mapping->host, page, offset);
- return block_invalidatepage(page, offset);
+ block_invalidatepage(page, offset);
}
struct address_space_operations xfs_address_space_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8355faf8ffd..1884300417e 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -375,9 +375,8 @@ xfs_init_zones(void)
if (!xfs_ioend_zone)
goto out_destroy_vnode_zone;
- xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
- mempool_alloc_slab, mempool_free_slab,
- xfs_ioend_zone);
+ xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
+ xfs_ioend_zone);
if (!xfs_ioend_pool)
goto out_free_ioend_zone;
return 0;
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 302201f1a09..3f88715e811 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x)
static inline unsigned long ffz(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
return __kernel_cttz(~word);
#else
@@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word)
*/
static inline unsigned long __ffs(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
return __kernel_cttz(word);
#else
@@ -313,20 +313,20 @@ static inline int ffs(int word)
/*
* fls: find last bit set.
*/
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
static inline int fls(int word)
{
return 64 - __kernel_ctlz(word & 0xffffffff);
}
#else
-#define fls generic_fls
+#include <asm-generic/bitops/fls.h>
#endif
-#define fls64 generic_fls64
+#include <asm-generic/bitops/fls64.h>
/* Compute powers of two for the given integer. */
static inline long floor_log2(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
return 63 - __kernel_ctlz(word);
#else
long bit;
@@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word)
* of bits set) of a N-bit word
*/
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
static inline unsigned long hweight64(unsigned long w)
{
@@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w)
#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
#else
-static inline unsigned long hweight64(unsigned long w)
-{
- unsigned long result;
- for (result = 0; w ; w >>= 1)
- result += (w & 1);
- return result;
-}
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif
#endif /* __KERNEL__ */
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-static inline unsigned long
-find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- p += offset >> 6;
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (64-offset);
- if (size < 64)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
- found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Find next one bit in a bitmap reasonably efficiently.
- */
-static inline unsigned long
-find_next_bit(const void * addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- p += offset >> 6;
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < 64)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp &= ~0UL >> (64 - size);
- if (!tmp)
- return result + size;
- found_middle:
- return result + __ffs(tmp);
-}
-
-/*
- * The optimizer actually does good code for this case.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
@@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3])
return __ffs(b0) + ofs;
}
+#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit __test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit __test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index c203fc2fa5c..ecb17a72acc 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -130,7 +130,7 @@ rdfpcr(void)
{
unsigned long tmp, ret;
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"mf_fpcr $f0\n\t"
@@ -154,7 +154,7 @@ wrfpcr(unsigned long val)
{
unsigned long tmp;
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"itoft %1,$f0\n\t"
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index d02de721ecc..0ac54b1a8ba 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
return res & mask;
}
-/*
- * Now the non-atomic variants. We let the compiler handle all
- * optimisations for these. These are all _native_ endian.
- */
-static inline void __set_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] |= (1UL << (nr & 31));
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] &= ~(1UL << (nr & 31));
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] ^= (1UL << (nr & 31));
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval | mask;
- return oldval & mask;
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval & ~mask;
- return oldval & mask;
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval ^ mask;
- return oldval & mask;
-}
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(int nr, const volatile unsigned long * p)
-{
- return (p[nr >> 5] >> (nr & 31)) & 1UL;
-}
+#include <asm-generic/bitops/non-atomic.h>
/*
* A note about Endian-ness.
@@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
-#define test_bit(nr,p) __test_bit(nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
-#define test_bit(nr,p) __test_bit(nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
@@ -292,57 +232,41 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
- int k;
-
- word = ~word;
- k = 31;
- if (word & 0x0000ffff) { k -= 16; word <<= 16; }
- if (word & 0x00ff0000) { k -= 8; word <<= 8; }
- if (word & 0x0f000000) { k -= 4; word <<= 4; }
- if (word & 0x30000000) { k -= 2; word <<= 2; }
- if (word & 0x40000000) { k -= 1; }
- return k;
-}
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- int k;
-
- k = 31;
- if (word & 0x0000ffff) { k -= 16; word <<= 16; }
- if (word & 0x00ff0000) { k -= 8; word <<= 8; }
- if (word & 0x0f000000) { k -= 4; word <<= 4; }
- if (word & 0x30000000) { k -= 2; word <<= 2; }
- if (word & 0x40000000) { k -= 1; }
- return k;
-}
-
-/*
- * fls: find last bit set.
- */
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/ffs.h>
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
+#else
-#define ffs(x) generic_ffs(x)
+static inline int constant_fls(int x)
+{
+ int r = 32;
-#else
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
/*
* On ARMv5 and above those functions can be implemented around
@@ -350,39 +274,18 @@ static inline unsigned long __ffs(unsigned long word)
*/
#define fls(x) \
- ( __builtin_constant_p(x) ? generic_fls(x) : \
+ ( __builtin_constant_p(x) ? constant_fls(x) : \
({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
-#define fls64(x) generic_fls64(x)
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
#define __ffs(x) (ffs(x) - 1)
#define ffz(x) __ffs( ~(x) )
#endif
-/*
- * Find first bit set in a 168-bit bitmap, where the first
- * 128 bits are unlikely to be set.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- unsigned long v;
- unsigned int off;
-
- for (off = 0; v = b[off], off < 4; off++) {
- if (unlikely(v))
- break;
- }
- return __ffs(v) + off * 32;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
+#include <asm-generic/bitops/fls64.h>
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
/*
* Ext2 is defined to use little-endian byte ordering.
@@ -397,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
#define ext2_clear_bit_atomic(lock,nr,p) \
test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define ext2_test_bit(nr,p) \
- __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+ test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define ext2_find_first_zero_bit(p,sz) \
_find_first_zero_bit_le(p,sz)
#define ext2_find_next_zero_bit(p,sz,off) \
@@ -410,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
#define minix_set_bit(nr,p) \
__set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_bit(nr,p) \
- __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+ test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_and_set_bit(nr,p) \
__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_and_clear_bit(nr,p) \
diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h
index d87f8634e62..19a69573a65 100644
--- a/include/asm-arm26/bitops.h
+++ b/include/asm-arm26/bitops.h
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
return res & mask;
}
-/*
- * Now the non-atomic variants. We let the compiler handle all
- * optimisations for these. These are all _native_ endian.
- */
-static inline void __set_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] |= (1UL << (nr & 31));
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] &= ~(1UL << (nr & 31));
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *p)
-{
- p[nr >> 5] ^= (1UL << (nr & 31));
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval | mask;
- return oldval & mask;
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval & ~mask;
- return oldval & mask;
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *p)
-{
- unsigned long oldval, mask = 1UL << (nr & 31);
-
- p += nr >> 5;
-
- oldval = *p;
- *p = oldval ^ mask;
- return oldval & mask;
-}
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int __test_bit(int nr, const volatile unsigned long * p)
-{
- return (p[nr >> 5] >> (nr & 31)) & 1UL;
-}
+#include <asm-generic/bitops/non-atomic.h>
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
@@ -211,7 +153,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
-#define test_bit(nr,p) __test_bit(nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@@ -219,80 +160,13 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
#define WORD_BITOFF_TO_LE(x) ((x))
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
- int k;
-
- word = ~word;
- k = 31;
- if (word & 0x0000ffff) { k -= 16; word <<= 16; }
- if (word & 0x00ff0000) { k -= 8; word <<= 8; }
- if (word & 0x0f000000) { k -= 4; word <<= 4; }
- if (word & 0x30000000) { k -= 2; word <<= 2; }
- if (word & 0x40000000) { k -= 1; }
- return k;
-}
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- int k;
-
- k = 31;
- if (word & 0x0000ffff) { k -= 16; word <<= 16; }
- if (word & 0x00ff0000) { k -= 8; word <<= 8; }
- if (word & 0x0f000000) { k -= 4; word <<= 4; }
- if (word & 0x30000000) { k -= 2; word <<= 2; }
- if (word & 0x40000000) { k -= 1; }
- return k;
-}
-
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-#define ffs(x) generic_ffs(x)
-
-/*
- * Find first bit set in a 168-bit bitmap, where the first
- * 128 bits are unlikely to be set.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- unsigned long v;
- unsigned int off;
-
- for (off = 0; v = b[off], off < 4; off++) {
- if (unlikely(v))
- break;
- }
- return __ffs(v) + off * 32;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
/*
* Ext2 is defined to use little-endian byte ordering.
@@ -307,7 +181,7 @@ static inline int sched_find_first_bit(unsigned long *b)
#define ext2_clear_bit_atomic(lock,nr,p) \
test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define ext2_test_bit(nr,p) \
- __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+ test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define ext2_find_first_zero_bit(p,sz) \
_find_first_zero_bit_le(p,sz)
#define ext2_find_next_zero_bit(p,sz,off) \
@@ -320,7 +194,7 @@ static inline int sched_find_first_bit(unsigned long *b)
#define minix_set_bit(nr,p) \
__set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_bit(nr,p) \
- __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+ test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_and_set_bit(nr,p) \
__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
#define minix_test_and_clear_bit(nr,p) \
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index b7fef1572dc..a569065113d 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -39,8 +39,6 @@ struct __dummy { unsigned long a[100]; };
#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
-#define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr)
-
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
@@ -54,8 +52,6 @@ struct __dummy { unsigned long a[100]; };
#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
-#define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr)
-
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
@@ -68,18 +64,6 @@ struct __dummy { unsigned long a[100]; };
#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)
-/*
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-
-#define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr)
-
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
@@ -101,19 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
retval = (mask & *adr) != 0;
*adr |= mask;
cris_atomic_restore(addr, flags);
- local_irq_restore(flags);
- return retval;
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *adr) != 0;
- *adr |= mask;
return retval;
}
@@ -148,27 +119,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *adr) != 0;
- *adr &= ~mask;
- return retval;
-}
-/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
@@ -191,42 +141,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return retval;
}
-/* WARNING: non atomic and it can be reordered! */
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *adr) != 0;
- *adr ^= mask;
-
- return retval;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- *
- * This routine doesn't need to be atomic.
- */
-
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- unsigned int mask;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- return ((mask & *adr) != 0);
-}
-
-/*
- * Find-bit routines..
- */
+#include <asm-generic/bitops/non-atomic.h>
/*
* Since we define it "external", it collides with the built-in
@@ -235,152 +150,18 @@ static inline int test_bit(int nr, const volatile unsigned long *addr)
*/
#define ffs kernel_ffs
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
-
-/*
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/find.h>
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/ext2-non-atomic.h>
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
- found_first:
- tmp |= ~0UL << size;
- found_middle:
- return result + ffz(tmp);
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (32 - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-#define ext2_set_bit test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (unlikely(b[3]))
- return __ffs(b[3]) + 96;
- if (b[4])
- return __ffs(b[4]) + 128;
- return __ffs(b[5]) + 32 + 128;
-}
+#include <asm-generic/bitops/minix.h>
+#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index f686b519878..6344d06390b 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -22,20 +22,7 @@
#ifdef __KERNEL__
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
- unsigned long result = 0;
-
- while (word & 1) {
- result++;
- word >>= 1;
- }
- return result;
-}
+#include <asm-generic/bitops/ffz.h>
/*
* clear_bit() doesn't provide any barrier for the compiler.
@@ -171,51 +158,9 @@ static inline int __test_bit(int nr, const volatile void * addr)
__constant_test_bit((nr),(addr)) : \
__test_bit((nr),(addr)))
-extern int find_next_bit(const unsigned long *addr, int size, int offset);
-
-#define find_first_bit(addr, size) find_next_bit(addr, size, 0)
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-static inline int find_next_zero_bit(const void *addr, int size, int offset)
-{
- const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-#define ffs(x) generic_ffs(x)
-#define __ffs(x) (ffs(x) - 1)
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/find.h>
/*
* fls: find last bit set.
@@ -228,114 +173,17 @@ found_middle:
\
bit ? 33 - bit : bit; \
})
-#define fls64(x) generic_fls64(x)
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-#define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr))
-
-#define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr)
-#define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr)
-
-static inline int ext2_test_bit(int nr, const volatile void * addr)
-{
- const volatile unsigned char *ADDR = (const unsigned char *) addr;
- int mask;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
-}
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static inline unsigned long ext2_find_next_zero_bit(const void *addr,
- unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease preformance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
+#define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr))
+#define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr))
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/minix-le.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 0e6d9852008..1f9d99193df 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -5,77 +5,27 @@
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents. You should
* recode these in the native assembly language, if at all possible.
- * To guarantee atomicity, these routines call cli() and sti() to
- * disable interrupts while they operate. (You have to provide inline
- * routines to cli() and sti().)
- *
- * Also note, these routines assume that you have 32 bit longs.
- * You will have to change this if you are trying to port Linux to the
- * Alpha architecture or to a Cray. :-)
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
-extern __inline__ int set_bit(int nr,long * addr)
-{
- int mask, retval;
-
- addr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- cli();
- retval = (mask & *addr) != 0;
- *addr |= mask;
- sti();
- return retval;
-}
-
-extern __inline__ int clear_bit(int nr, long * addr)
-{
- int mask, retval;
-
- addr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- cli();
- retval = (mask & *addr) != 0;
- *addr &= ~mask;
- sti();
- return retval;
-}
-
-extern __inline__ int test_bit(int nr, const unsigned long * addr)
-{
- int mask;
-
- addr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- return ((mask & *addr) != 0);
-}
-
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-#define ffs(x) generic_ffs(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 00000000000..9a3274aecf8
--- /dev/null
+++ b/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_GENERIC_BITOPS___FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
new file mode 100644
index 00000000000..78339319ba0
--- /dev/null
+++ b/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,191 @@
+#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <asm/types.h>
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#include <asm/cache.h> /* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+# define ATOMIC_HASH_SIZE 4
+# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+
+/* Can't use raw_spin_lock_irq because of #include problems, so
+ * this is the substitute */
+#define _atomic_spin_lock_irqsave(l,f) do { \
+ raw_spinlock_t *s = ATOMIC_HASH(l); \
+ local_irq_save(f); \
+ __raw_spin_lock(s); \
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do { \
+ raw_spinlock_t *s = ATOMIC_HASH(l); \
+ __raw_spin_unlock(s); \
+ local_irq_restore(f); \
+} while(0)
+
+
+#else
+# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#endif
+
+/*
+ * NMI events can occur at any time, including when interrupts have been
+ * disabled by *_irqsave(). So you can get NMI events occurring while a
+ * *_bit function is holding a spin lock. If the NMI handler also wants
+ * to do bit manipulation (and they do) then you can get a deadlock
+ * between the original caller of *_bit() and the NMI handler.
+ *
+ * by Keith Owens
+ */
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p |= mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p &= ~mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p ^= mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old | mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old & ~mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old ^ mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 00000000000..ab1c875efb7
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
new file mode 100644
index 00000000000..1697404afa0
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-non-atomic.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
+
+#include <asm-generic/bitops/le.h>
+
+#define ext2_set_bit(nr,addr) \
+ generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define ext2_clear_bit(nr,addr) \
+ generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
+
+#define ext2_test_bit(nr,addr) \
+ generic_test_le_bit((nr),(unsigned long *)(addr))
+#define ext2_find_first_zero_bit(addr, size) \
+ generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
+#define ext2_find_next_zero_bit(addr, size, off) \
+ generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
new file mode 100644
index 00000000000..fbbb43af7dc
--- /dev/null
+++ b/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h
new file mode 100644
index 00000000000..6744bd4cdf4
--- /dev/null
+++ b/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
new file mode 100644
index 00000000000..72a51e5a12e
--- /dev/null
+++ b/include/asm-generic/bitops/find.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_GENERIC_BITOPS_FIND_H_
+#define _ASM_GENERIC_BITOPS_FIND_H_
+
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+ size, unsigned long offset);
+
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
new file mode 100644
index 00000000000..850859bc506
--- /dev/null
+++ b/include/asm-generic/bitops/fls.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
new file mode 100644
index 00000000000..1b6b17ce242
--- /dev/null
+++ b/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
+#define _ASM_GENERIC_BITOPS_FLS64_H_
+
+#include <asm/types.h>
+
+static inline int fls64(__u64 x)
+{
+ __u32 h = x >> 32;
+ if (h)
+ return fls(h) + 32;
+ return fls(x);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
new file mode 100644
index 00000000000..fbbc383771d
--- /dev/null
+++ b/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm/types.h>
+
+extern unsigned int hweight32(unsigned int w);
+extern unsigned int hweight16(unsigned int w);
+extern unsigned int hweight8(unsigned int w);
+extern unsigned long hweight64(__u64 w);
+
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
new file mode 100644
index 00000000000..b9c7e5d2d2a
--- /dev/null
+++ b/include/asm-generic/bitops/le.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_GENERIC_BITOPS_LE_H_
+#define _ASM_GENERIC_BITOPS_LE_H_
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#if defined(__LITTLE_ENDIAN)
+
+#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
+#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
+#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
+
+#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
+#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
+
+#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
+#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
+
+#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
+
+#elif defined(__BIG_ENDIAN)
+
+#define generic_test_le_bit(nr, addr) \
+ test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___set_le_bit(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___clear_le_bit(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define generic_test_and_set_le_bit(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic_test_and_clear_le_bit(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define generic___test_and_set_le_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___test_and_clear_le_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define generic_find_first_zero_le_bit(addr, size) \
+ generic_find_next_zero_le_bit((addr), (size), 0)
+
+#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h
new file mode 100644
index 00000000000..4a981c1bb1a
--- /dev/null
+++ b/include/asm-generic/bitops/minix-le.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
+#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
+
+#include <asm-generic/bitops/le.h>
+
+#define minix_test_and_set_bit(nr,addr) \
+ generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define minix_set_bit(nr,addr) \
+ generic___set_le_bit((nr),(unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr,addr) \
+ generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
+#define minix_test_bit(nr,addr) \
+ generic_test_le_bit((nr),(unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr,size) \
+ generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
+
+#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h
new file mode 100644
index 00000000000..91f42e87aa5
--- /dev/null
+++ b/include/asm-generic/bitops/minix.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
+#define _ASM_GENERIC_BITOPS_MINIX_H_
+
+#define minix_test_and_set_bit(nr,addr) \
+ __test_and_set_bit((nr),(unsigned long *)(addr))
+#define minix_set_bit(nr,addr) \
+ __set_bit((nr),(unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr,addr) \
+ __test_and_clear_bit((nr),(unsigned long *)(addr))
+#define minix_test_bit(nr,addr) \
+ test_bit((nr),(unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr,size) \
+ find_first_zero_bit((unsigned long *)(addr),(size))
+
+#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 00000000000..46a825cf2ae
--- /dev/null
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,111 @@
+#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+
+#include <asm/types.h>
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p |= mask;
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+ return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
new file mode 100644
index 00000000000..5ef93a4d009
--- /dev/null
+++ b/include/asm-generic/bitops/sched.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
+#define _ASM_GENERIC_BITOPS_SCHED_H_
+
+#include <linux/compiler.h> /* unlikely() */
+#include <asm/types.h>
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#if BITS_PER_LONG == 64
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#elif BITS_PER_LONG == 32
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index ff7c2b72159..574f57b6c4d 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -8,7 +8,6 @@
#include <linux/config.h>
#include <linux/compiler.h>
-#include <asm/byteorder.h> /* swab32 */
#include <asm/system.h>
#ifdef __KERNEL__
@@ -177,10 +176,7 @@ H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
#undef H8300_GEN_TEST_BITOP_CONST_INT
#undef H8300_GEN_TEST_BITOP
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-#define ffs(x) generic_ffs(x)
+#include <asm-generic/bitops/ffs.h>
static __inline__ unsigned long __ffs(unsigned long word)
{
@@ -196,216 +192,16 @@ static __inline__ unsigned long __ffs(unsigned long word)
return result;
}
-static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset)
-{
- unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL)
- return result + size;
-found_middle:
- return result + __ffs(tmp);
-}
-
-#define find_first_bit(addr, size) find_next_bit(addr, size, 0)
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-static __inline__ int ext2_set_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR |= mask;
- local_irq_restore(flags);
- return retval;
-}
-#define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
-
-static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR &= ~mask;
- local_irq_restore(flags);
- return retval;
-}
-#define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
-
-static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
-{
- int mask;
- const volatile unsigned char *ADDR = (const unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
-}
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease performance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#endif /* _H8300_BITOPS_H */
diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h
index bf91e0d4dde..da2402b8654 100644
--- a/include/asm-h8300/types.h
+++ b/include/asm-h8300/types.h
@@ -58,6 +58,9 @@ typedef u32 dma_addr_t;
#define HAVE_SECTOR_T
typedef u64 sector_t;
+#define HAVE_BLKCNT_T
+typedef u64 blkcnt_t;
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 7d20b95edb3..08deaeee6be 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -362,28 +362,9 @@ static inline unsigned long ffz(unsigned long word)
return word;
}
-#define fls64(x) generic_fls64(x)
-
#ifdef __KERNEL__
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
+#include <asm-generic/bitops/sched.h>
/**
* ffs - find first bit set
@@ -421,42 +402,22 @@ static inline int fls(int x)
return r+1;
}
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
+#include <asm-generic/bitops/fls64.h>
+
#ifdef __KERNEL__
-#define ext2_set_bit(nr,addr) \
- __test_and_set_bit((nr),(unsigned long*)addr)
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit_atomic(lock,nr, addr) \
test_and_clear_bit((nr),(unsigned long*)addr)
-#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_bit((unsigned long*)addr, size, off)
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
-#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((void*)addr,size)
+
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index a0d2d74a7dd..57d157c5cf8 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -34,6 +34,7 @@ struct pt_regs;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
+#define RELATIVEJUMP_INSTRUCTION 0xe9
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
@@ -51,6 +52,11 @@ void kretprobe_trampoline(void);
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t *insn;
+ /*
+ * If this flag is not 0, this kprobe can be boost when its
+ * post_handler and break_handler is not set.
+ */
+ int boostable;
};
struct prev_kprobe {
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h
index b464f8020ec..67eae78323b 100644
--- a/include/asm-i386/stat.h
+++ b/include/asm-i386/stat.h
@@ -58,8 +58,7 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
- unsigned long __pad4; /* future possible st_blocks high bits */
+ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime;
unsigned long st_atime_nsec;
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
index ced00fe8fe6..e50a08bd7ce 100644
--- a/include/asm-i386/types.h
+++ b/include/asm-i386/types.h
@@ -63,6 +63,11 @@ typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#define HAVE_BLKCNT_T
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 36d0fb95ea8..90921e16279 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -5,8 +5,8 @@
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
- * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
- * scheduler patch
+ * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
+ * O(1) scheduler patch
*/
#include <linux/compiler.h>
@@ -25,9 +25,9 @@
* restricted to acting on a single-word quantity.
*
* The address must be (at least) "long" aligned.
- * Note that there are driver (e.g., eepro100) which use these operations to operate on
- * hw-defined data-structures, so we can't easily change these operations to force a
- * bigger alignment.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
@@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr)
* ffz - find the first zero bit in a long word
* @x: The long word to find the bit in
*
- * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
- * no zero exists, so code should check against ~0UL first...
+ * Returns the bit-number (0..63) of the first (least significant) zero bit.
+ * Undefined if no zero exists, so code should check against ~0UL first...
*/
static inline unsigned long
ffz (unsigned long x)
@@ -345,13 +345,14 @@ fls (int t)
x |= x >> 16;
return ia64_popcnt(x);
}
-#define fls64(x) generic_fls64(x)
+
+#include <asm-generic/bitops/fls64.h>
/*
- * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
- * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
- * "int" values only and the result value is the bit number + 1. ffs(0) is defined to
- * return zero.
+ * ffs: find first bit set. This is defined the same way as the libc and
+ * compiler builtin ffs routines, therefore differs in spirit from the above
+ * ffz (man ffs): it operates on "int" values only and the result value is the
+ * bit number + 1. ffs(0) is defined to return zero.
*/
#define ffs(x) __builtin_ffs(x)
@@ -373,51 +374,17 @@ hweight64 (unsigned long x)
#endif /* __KERNEL__ */
-extern int __find_next_zero_bit (const void *addr, unsigned long size,
- unsigned long offset);
-extern int __find_next_bit(const void *addr, unsigned long size,
- unsigned long offset);
-
-#define find_next_zero_bit(addr, size, offset) \
- __find_next_zero_bit((addr), (size), (offset))
-#define find_next_bit(addr, size, offset) \
- __find_next_bit((addr), (size), (offset))
-
-/*
- * The optimizer actually does good code for this case..
- */
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
-#define __clear_bit(nr, addr) clear_bit(nr, addr)
+#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-static inline int
-sched_find_first_bit (unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return 64 + __ffs(b[1]);
- return __ffs(b[2]) + 128;
-}
+#include <asm-generic/bitops/minix.h>
+#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h
new file mode 100644
index 00000000000..f3efaa22952
--- /dev/null
+++ b/include/asm-ia64/dmi.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <asm/io.h>
+
+#endif
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index b64fdb98549..c2e3742108b 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -88,8 +88,8 @@ phys_to_virt (unsigned long address)
}
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
-extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count);
+extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
+extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count);
/*
* The following two macros are deprecated and scheduled for removal.
@@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr)
# define outl_p outl
#endif
-/*
- * An "address" in IO memory space is not clearly either an integer or a pointer. We will
- * accept both, thus the casts.
- *
- * On ia-64, we access the physical I/O memory space through the uncached kernel region.
- */
-static inline void __iomem *
-ioremap (unsigned long offset, unsigned long size)
-{
- return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
-}
+extern void __iomem * ioremap(unsigned long offset, unsigned long size);
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
static inline void
iounmap (volatile void __iomem *addr)
{
}
-#define ioremap_nocache(o,s) ioremap(o,s)
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
# ifdef __KERNEL__
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 244449df741..bf4cc867a69 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -159,7 +159,7 @@
static inline u32
sn_sal_rev(void)
{
- struct ia64_sal_systab *systab = efi.sal_systab;
+ struct ia64_sal_systab *systab = __va(efi.sal_systab);
return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
}
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
index abea2fdd868..902a366101a 100644
--- a/include/asm-m32r/bitops.h
+++ b/include/asm-m32r/bitops.h
@@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
}
/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __set_bit(int nr, volatile void * addr)
-{
- __u32 mask;
- volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- *a |= mask;
-}
-
-/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
@@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
-{
- unsigned long mask;
- volatile unsigned long *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- *a &= ~mask;
-}
-
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- __u32 mask;
- volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- *a ^= mask;
-}
-
-/**
* change_bit - Toggle a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
@@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
}
/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- __u32 mask, oldbit;
- volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- oldbit = (*a & mask);
- *a |= mask;
-
- return (oldbit != 0);
-}
-
-/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- __u32 mask, oldbit;
- volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- oldbit = (*a & mask);
- *a &= ~mask;
-
- return (oldbit != 0);
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
- __u32 mask, oldbit;
- volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
- oldbit = (*a & mask);
- *a ^= mask;
-
- return (oldbit != 0);
-}
-
-/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
return (oldbit != 0);
}
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static __inline__ int test_bit(int nr, const volatile void * addr)
-{
- __u32 mask;
- const volatile __u32 *a = addr;
-
- a += (nr >> 5);
- mask = (1 << (nr & 0x1F));
-
- return ((*a & mask) != 0);
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
- int k;
-
- word = ~word;
- k = 0;
- if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
- if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
- if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
- if (!(word & 0x00000003)) { k += 2; word >>= 2; }
- if (!(word & 0x00000001)) { k += 1; }
-
- return k;
-}
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ int find_next_zero_bit(const unsigned long *addr,
- int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __inline__ unsigned long __ffs(unsigned long word)
-{
- int k = 0;
-
- if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
- if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
- if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
- if (!(word & 0x00000003)) { k += 2; word >>= 2; }
- if (!(word & 0x00000001)) { k += 1;}
-
- return k;
-}
-
-/*
- * fls: find last bit set.
- */
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-#define ffs(x) generic_ffs(x)
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-/*
- * ext2_XXXX function
- * orig: include/asm-sh/bitops.h
- */
-
-#ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit test_and_set_bit
-#define ext2_clear_bit __test_and_clear_bit
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-#else
-static inline int ext2_set_bit(int nr, volatile void * addr)
-{
- __u8 mask, oldbit;
- volatile __u8 *a = addr;
-
- a += (nr >> 3);
- mask = (1 << (nr & 0x07));
- oldbit = (*a & mask);
- *a |= mask;
-
- return (oldbit != 0);
-}
-
-static inline int ext2_clear_bit(int nr, volatile void * addr)
-{
- __u8 mask, oldbit;
- volatile __u8 *a = addr;
-
- a += (nr >> 3);
- mask = (1 << (nr & 0x07));
- oldbit = (*a & mask);
- *a &= ~mask;
-
- return (oldbit != 0);
-}
-
-static inline int ext2_test_bit(int nr, const volatile void * addr)
-{
- __u32 mask;
- const volatile __u8 *a = addr;
-
- a += (nr >> 3);
- mask = (1 << (nr & 0x07));
-
- return ((mask & *a) != 0);
-}
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static inline unsigned long ext2_find_next_zero_bit(void *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease preformance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-#endif
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_set_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_clear_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h
index 13f4c004846..1a61fdb56aa 100644
--- a/include/asm-m68k/bitops.h
+++ b/include/asm-m68k/bitops.h
@@ -310,36 +310,10 @@ static inline int fls(int x)
return 32 - cnt;
}
-#define fls64(x) generic_fls64(x)
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
/* Bitmap functions for the minix filesystem */
@@ -365,9 +339,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
return ((p - addr) << 4) + (res ^ 31);
}
-#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
-#define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
static inline int minix_test_bit(int nr, const void *vaddr)
{
@@ -377,9 +351,9 @@ static inline int minix_test_bit(int nr, const void *vaddr)
/* Bitmap functions for the ext2 filesystem. */
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
static inline int ext2_test_bit(int nr, const void *vaddr)
diff --git a/include/asm-m68k/stat.h b/include/asm-m68k/stat.h
index c4c402a45e2..dd38bc2e9f9 100644
--- a/include/asm-m68k/stat.h
+++ b/include/asm-m68k/stat.h
@@ -60,8 +60,7 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
- unsigned long __pad4; /* future possible st_blocks high bits */
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime;
unsigned long st_atime_nsec;
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index 25d8a3cfef9..0b68ccd327f 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -12,104 +12,10 @@
#ifdef __KERNEL__
-/*
- * Generic ffs().
- */
-static inline int ffs(int x)
-{
- int r = 1;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * Generic __ffs().
- */
-static inline int __ffs(int x)
-{
- int r = 0;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
- unsigned long result = 0;
-
- while(word & 1) {
- result++;
- word >>= 1;
- }
- return result;
-}
-
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffz.h>
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
@@ -254,98 +160,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
__constant_test_bit((nr),(addr)) : \
__test_bit((nr),(addr)))
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-static __inline__ int find_next_zero_bit (const void * addr, int size, int offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Find next one bit in a bitmap reasonably efficiently.
- */
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/hweight.h>
static __inline__ int ext2_set_bit(int nr, volatile void * addr)
{
@@ -475,30 +291,11 @@ found_middle:
return result + ffz(__swab32(tmp));
}
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
-/*
- * fls: find last bit set.
- */
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 8e802059fe6..a1728f8c070 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
}
/*
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-
- *m |= 1UL << (nr & SZLONG_MASK);
-}
-
-/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
@@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
}
/*
- * __clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * Unlike clear_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-
- *m &= ~(1UL << (nr & SZLONG_MASK));
-}
-
-/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
@@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
}
/*
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-
- *m ^= 1UL << (nr & SZLONG_MASK);
-}
-
-/*
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr,
}
/*
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
- retval = (mask & *a) != 0;
- *a |= mask;
-
- return retval;
-}
-
-/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
@@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr,
}
/*
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(unsigned long nr,
- volatile unsigned long * addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
- a += (nr >> SZLONG_LOG);
- mask = 1UL << (nr & SZLONG_MASK);
- retval = ((mask & *a) != 0);
- *a &= ~mask;
-
- return retval;
-}
-
-/*
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
@@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr,
}
}
-/*
- * __test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_change_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- volatile unsigned long *a = addr;
- unsigned long mask;
- int retval;
-
- a += (nr >> SZLONG_LOG);
- mask = 1UL << (nr & SZLONG_MASK);
- retval = ((mask & *a) != 0);
- *a ^= mask;
-
- return retval;
-}
-
#undef __bi_flags
#undef __bi_local_irq_save
#undef __bi_local_irq_restore
-/*
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
-}
+#include <asm-generic/bitops/non-atomic.h>
/*
* Return the bit position (0..63) of the most significant 1 bit in a word
@@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x)
return 63 - lz;
}
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
+
/*
* __ffs - find first bit in word.
* @word: The word to search
@@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x)
*/
static inline unsigned long __ffs(unsigned long word)
{
-#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
return __ilog2(word & -word);
-#else
- int b = 0, s;
-
-#ifdef CONFIG_32BIT
- s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
- s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
- s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
- s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
- s = 1; if (word << 31 != 0) s = 0; b += s;
-
- return b;
-#endif
-#ifdef CONFIG_64BIT
- s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
- s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
- s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
- s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
- s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
- s = 1; if (word << 63 != 0) s = 0; b += s;
-
- return b;
-#endif
-#endif
}
/*
@@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word)
*/
static inline unsigned long fls(unsigned long word)
{
-#ifdef CONFIG_32BIT
#ifdef CONFIG_CPU_MIPS32
__asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
return 32 - word;
-#else
- {
- int r = 32, s;
-
- if (word == 0)
- return 0;
-
- s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
- s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
- s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
- s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
- s = 1; if ((word & 0x80000000)) s = 0; r -= s;
-
- return r;
- }
#endif
-#endif /* CONFIG_32BIT */
-#ifdef CONFIG_64BIT
#ifdef CONFIG_CPU_MIPS64
-
__asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
return 64 - word;
-#else
- {
- int r = 64, s;
-
- if (word == 0)
- return 0;
-
- s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
- s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
- s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
- s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
- s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
- s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
-
- return r;
- }
#endif
-#endif /* CONFIG_64BIT */
}
-#define fls64(x) generic_fls64(x)
-
-/*
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> SZLONG_LOG);
- unsigned long result = offset & ~SZLONG_MASK;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= SZLONG_MASK;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (_MIPS_SZLONG-offset);
- if (size < _MIPS_SZLONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= _MIPS_SZLONG;
- result += _MIPS_SZLONG;
- }
- while (size & ~SZLONG_MASK) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += _MIPS_SZLONG;
- size -= _MIPS_SZLONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
+#else
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
-/*
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> SZLONG_LOG);
- unsigned long result = offset & ~SZLONG_MASK;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= SZLONG_MASK;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < _MIPS_SZLONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= _MIPS_SZLONG;
- result += _MIPS_SZLONG;
- }
- while (size & ~SZLONG_MASK) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += _MIPS_SZLONG;
- size -= _MIPS_SZLONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (_MIPS_SZLONG - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
+#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
-/*
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
-#ifdef CONFIG_32BIT
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-#endif
-#ifdef CONFIG_64BIT
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-#endif
-}
-
-/*
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
-{
- unsigned char *ADDR = (unsigned char *) addr;
- int mask, retval;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- retval = (mask & *ADDR) != 0;
- *ADDR |= mask;
-
- return retval;
-}
-
-static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
-{
- unsigned char *ADDR = (unsigned char *) addr;
- int mask, retval;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- retval = (mask & *ADDR) != 0;
- *ADDR &= ~mask;
-
- return retval;
-}
-
-static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
-{
- const unsigned char *ADDR = (const unsigned char *) addr;
- int mask;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
-
- return ((mask & *ADDR) != 0);
-}
-
-static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
- unsigned long result = offset & ~SZLONG_MASK;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= SZLONG_MASK;
- if (offset) {
- tmp = cpu_to_lelongp(p++);
- tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
- if (size < _MIPS_SZLONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= _MIPS_SZLONG;
- result += _MIPS_SZLONG;
- }
- while (size & ~SZLONG_MASK) {
- if (~(tmp = cpu_to_lelongp(p++)))
- goto found_middle;
- result += _MIPS_SZLONG;
- size -= _MIPS_SZLONG;
- }
- if (!size)
- return result;
- tmp = cpu_to_lelongp(p);
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-
-found_middle:
- return result + ffz(tmp);
-}
-
-#define find_first_zero_le_bit(addr, size) \
- find_next_zero_le_bit((addr), (size), 0)
-
-#define ext2_set_bit(nr,addr) \
- __test_and_set_le_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_le_bit((nr),(unsigned long*)addr)
- #define ext2_set_bit_atomic(lock, nr, addr) \
-({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_set_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
-})
-
-#define ext2_clear_bit_atomic(lock, nr, addr) \
-({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_clear_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
-})
-#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_le_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_le_bit((unsigned long*)addr, size, off)
-
-/*
- * Bitmap functions for the minix filesystem.
- *
- * FIXME: These assume that Minix uses the native byte/bitorder.
- * This limits the Minix filesystem's value for data exchange very much.
- */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h
index 421b3aea14c..cd2813d8e13 100644
--- a/include/asm-mips/types.h
+++ b/include/asm-mips/types.h
@@ -99,6 +99,11 @@ typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#define HAVE_BLKCNT_T
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index 15d8c2b5158..900561922c4 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
-
- *m |= 1UL << CHOP_SHIFTCOUNT(nr);
-}
-
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
@@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
-
- *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
-}
-
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
_atomic_spin_unlock_irqrestore(addr, flags);
}
-static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
-{
- unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
-
- *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
-}
-
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
return (oldbit & mask) ? 1 : 0;
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
-{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
- unsigned long oldbit;
- unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
-
- oldbit = *addr;
- *addr = oldbit | mask;
-
- return (oldbit & mask) ? 1 : 0;
-}
-
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
return (oldbit & mask) ? 1 : 0;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
-{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
- unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
- unsigned long oldbit;
-
- oldbit = *addr;
- *addr = oldbit & ~mask;
-
- return (oldbit & mask) ? 1 : 0;
-}
-
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
return (oldbit & mask) ? 1 : 0;
}
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
-{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
- unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
- unsigned long oldbit;
-
- oldbit = *addr;
- *addr = oldbit ^ mask;
-
- return (oldbit & mask) ? 1 : 0;
-}
-
-static __inline__ int test_bit(int nr, const volatile unsigned long *address)
-{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
- const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
-
- return !!(*addr & mask);
-}
+#include <asm-generic/bitops/non-atomic.h>
#ifdef __KERNEL__
@@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
return ret;
}
-/* Undefined if no bit is zero. */
-#define ffz(x) __ffs(~x)
+#include <asm-generic/bitops/ffz.h>
/*
* ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
@@ -263,155 +199,22 @@ static __inline__ int fls(int x)
return ret;
}
-#define fls64(x) generic_fls64(x)
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
-#ifdef __LP64__
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-#else
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-#endif
-}
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
-{
- const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= (BITS_PER_LONG-1);
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (BITS_PER_LONG-offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG -1)) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= (BITS_PER_LONG-1);
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-#define _EXT2_HAVE_ASM_BITOPS_
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
-/*
- * test_and_{set,clear}_bit guarantee atomicity without
- * disabling interrupts.
- */
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
/* '3' is bits per byte */
#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
-#define ext2_test_bit(nr, addr) \
- test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
-#define ext2_set_bit(nr, addr) \
- __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
-
#define ext2_set_bit_atomic(l,nr,addr) \
test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
#define ext2_clear_bit_atomic(l,nr,addr) \
@@ -419,77 +222,6 @@ found_middle:
#endif /* __KERNEL__ */
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swabp(unsigned long * x)
-{
-#ifdef __LP64__
- return (unsigned long) __swab64p((u64 *) x);
-#else
- return (unsigned long) __swab32p((u32 *) x);
-#endif
-}
-
-/* include/linux/byteorder doesn't support "unsigned long" type */
-static inline unsigned long ext2_swab(unsigned long y)
-{
-#ifdef __LP64__
- return (unsigned long) __swab64((u64) y);
-#else
- return (unsigned long) __swab32((u32) y);
-#endif
-}
-
-static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= (BITS_PER_LONG - 1UL);
- if (offset) {
- tmp = ext2_swabp(p++);
- tmp |= (~0UL >> (BITS_PER_LONG - offset));
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
-
- while (size & ~(BITS_PER_LONG - 1)) {
- if (~(tmp = *(p++)))
- goto found_middle_swap;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = ext2_swabp(p);
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. Skip ffz */
-found_middle:
- return result + ffz(tmp);
-
-found_middle_swap:
- return result + ffz(ext2_swab(tmp));
-}
-
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
-#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/minix-le.h>
#endif /* _PARISC_BITOPS_H */
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index bf6941a810b..d1c2a440566 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
: "cc");
}
-/* Non-atomic versions */
-static __inline__ int test_bit(unsigned long nr,
- __const__ volatile unsigned long *addr)
-{
- return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
-
-static __inline__ void __set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- *p |= mask;
-}
-
-static __inline__ void __clear_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- *p &= ~mask;
-}
-
-static __inline__ void __change_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- *p ^= mask;
-}
-
-static __inline__ int __test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int __test_and_clear_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int __test_and_change_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
+#include <asm-generic/bitops/non-atomic.h>
/*
* Return the zero-based bit position (LE, not IBM bit numbering) of
@@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x)
asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
return 32 - lz;
}
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/fls64.h>
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
unsigned long find_next_zero_bit(const unsigned long *addr,
@@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr,
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_le_bit((unsigned long *)addr, size)
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
-#ifdef CONFIG_PPC64
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-#else
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-#endif
-}
+#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h
index ec3c2ee8bf8..baabba96e31 100644
--- a/include/asm-powerpc/types.h
+++ b/include/asm-powerpc/types.h
@@ -103,6 +103,11 @@ typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#define HAVE_BLKCNT_T
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 3628899f48b..ca092ffb7a9 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -828,35 +828,12 @@ static inline int sched_find_first_bit(unsigned long *b)
return find_first_bit(b, 140);
}
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-#define ffs(x) generic_ffs(x)
+#include <asm-generic/bitops/ffs.h>
-/*
- * fls: find last bit set.
- */
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight64(x) \
-({ \
- unsigned long __x = (x); \
- unsigned int __w; \
- __w = generic_hweight32((unsigned int) __x); \
- __w += generic_hweight32((unsigned int) (__x>>32)); \
- __w; \
-})
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
#ifdef __KERNEL__
@@ -871,11 +848,11 @@ static inline int sched_find_first_bit(unsigned long *b)
*/
#define ext2_set_bit(nr, addr) \
- test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
+ __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
#define ext2_clear_bit(nr, addr) \
- test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
+ __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
#define ext2_test_bit(nr, addr) \
@@ -1011,18 +988,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
return offset + ext2_find_first_zero_bit(p, size);
}
-/* Bitmap functions for the minix filesystem. */
-/* FIXME !!! */
-#define minix_test_and_set_bit(nr,addr) \
- test_and_set_bit(nr,(unsigned long *)addr)
-#define minix_set_bit(nr,addr) \
- set_bit(nr,(unsigned long *)addr)
-#define minix_test_and_clear_bit(nr,addr) \
- test_and_clear_bit(nr,(unsigned long *)addr)
-#define minix_test_bit(nr,addr) \
- test_bit(nr,(unsigned long *)addr)
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h
index d0be3e47701..5738ad63537 100644
--- a/include/asm-s390/types.h
+++ b/include/asm-s390/types.h
@@ -93,6 +93,11 @@ typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#define HAVE_BLKCNT_T
+#endif
+
#endif /* ! __s390x__ */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index 1c526086004..e34f8250856 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -19,16 +19,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static __inline__ void __set_bit(int nr, volatile void * addr)
-{
- int mask;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a |= mask;
-}
-
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
@@ -47,16 +37,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static __inline__ void __clear_bit(int nr, volatile void * addr)
-{
- int mask;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a &= ~mask;
-}
-
static __inline__ void change_bit(int nr, volatile void * addr)
{
int mask;
@@ -70,16 +50,6 @@ static __inline__ void change_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- int mask;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a ^= mask;
-}
-
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -96,19 +66,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a |= mask;
-
- return retval;
-}
-
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -125,19 +82,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a &= ~mask;
-
- return retval;
-}
-
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -154,23 +98,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a ^= mask;
-
- return retval;
-}
-
-static __inline__ int test_bit(int nr, const volatile void *addr)
-{
- return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
-}
+#include <asm-generic/bitops/non-atomic.h>
static __inline__ unsigned long ffz(unsigned long word)
{
@@ -206,271 +134,15 @@ static __inline__ unsigned long __ffs(unsigned long word)
return result;
}
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-#define ffs(x) generic_ffs(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-#ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
-#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
-#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
-#define ext2_find_next_zero_bit(addr, size, offset) \
- find_next_zero_bit((unsigned long *)(addr), (size), (offset))
-#else
-static __inline__ int ext2_set_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR |= mask;
- local_irq_restore(flags);
- return retval;
-}
-
-static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR &= ~mask;
- local_irq_restore(flags);
- return retval;
-}
-
-static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
-{
- int mask;
- const volatile unsigned char *ADDR = (const unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
-}
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease preformance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-#endif
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_set_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_clear_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h
index 914e3fcbbd3..6c41a60657f 100644
--- a/include/asm-sh/stat.h
+++ b/include/asm-sh/stat.h
@@ -60,13 +60,7 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
-#if defined(__BIG_ENDIAN__)
- unsigned long __pad4; /* Future possible st_blocks hi bits */
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
-#else /* Must be little */
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
- unsigned long __pad4; /* Future possible st_blocks hi bits */
-#endif
+ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime;
unsigned long st_atime_nsec;
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 85f0c11b431..7345350d98c 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -18,7 +18,7 @@
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
- __u32 flags; /* low level flags */
+ unsigned long flags; /* low level flags */
__u32 cpu;
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
index cb7e183a0a6..488552f43b2 100644
--- a/include/asm-sh/types.h
+++ b/include/asm-sh/types.h
@@ -58,6 +58,11 @@ typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#define HAVE_BLKCNT_T
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
index ce9c3ad45fe..f3bdcdb5d04 100644
--- a/include/asm-sh64/bitops.h
+++ b/include/asm-sh64/bitops.h
@@ -31,16 +31,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static inline void __set_bit(int nr, void *addr)
-{
- int mask;
- unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a |= mask;
-}
-
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
@@ -58,15 +48,6 @@ static inline void clear_bit(int nr, volatile unsigned long *a)
local_irq_restore(flags);
}
-static inline void __clear_bit(int nr, volatile unsigned long *a)
-{
- int mask;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a &= ~mask;
-}
-
static __inline__ void change_bit(int nr, volatile void * addr)
{
int mask;
@@ -80,16 +61,6 @@ static __inline__ void change_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- int mask;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- *a ^= mask;
-}
-
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -106,19 +77,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a |= mask;
-
- return retval;
-}
-
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -135,19 +93,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a &= ~mask;
-
- return retval;
-}
-
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -164,23 +109,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
return retval;
}
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- volatile unsigned int *a = addr;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- retval = (mask & *a) != 0;
- *a ^= mask;
-
- return retval;
-}
-
-static __inline__ int test_bit(int nr, const volatile void *addr)
-{
- return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
-}
+#include <asm-generic/bitops/non-atomic.h>
static __inline__ unsigned long ffz(unsigned long word)
{
@@ -204,313 +133,16 @@ static __inline__ unsigned long ffz(unsigned long word)
return result;
}
-/**
- * __ffs - find first bit in word
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- int r = 0;
-
- if (!word)
- return 0;
- if (!(word & 0xffff)) {
- word >>= 16;
- r += 16;
- }
- if (!(word & 0xff)) {
- word >>= 8;
- r += 8;
- }
- if (!(word & 0xf)) {
- word >>= 4;
- r += 4;
- }
- if (!(word & 3)) {
- word >>= 2;
- r += 2;
- }
- if (!(word & 1)) {
- word >>= 1;
- r += 1;
- }
- return r;
-}
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-
-static inline int find_next_zero_bit(void *addr, int size, int offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-#define ffs(x) generic_ffs(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-#ifdef __LITTLE_ENDIAN__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
-#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
-#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
-#define ext2_find_next_zero_bit(addr, size, offset) \
- find_next_zero_bit((addr), (size), (offset))
-#else
-static __inline__ int ext2_set_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR |= mask;
- local_irq_restore(flags);
- return retval;
-}
-
-static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
-{
- int mask, retval;
- unsigned long flags;
- volatile unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- local_irq_save(flags);
- retval = (mask & *ADDR) != 0;
- *ADDR &= ~mask;
- local_irq_restore(flags);
- return retval;
-}
-
-static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
-{
- int mask;
- const volatile unsigned char *ADDR = (const unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
-}
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease preformance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-#endif
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_set_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_clear_bit((nr), (addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-#define ffs(x) generic_ffs(x)
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 41722b5e45e..04aa3318f76 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "memory", "cc");
}
-/*
- * non-atomic versions
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p |= mask;
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p &= ~mask;
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p ^= mask;
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1UL << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
+#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
-/* The following routine need not be atomic. */
-static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
-{
- return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
-}
-
-/* The easy/cheese version for now. */
-static inline unsigned long ffz(unsigned long word)
-{
- unsigned long result = 0;
-
- while(word & 1) {
- result++;
- word >>= 1;
- }
- return result;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline int __ffs(unsigned long word)
-{
- int num = 0;
-
- if ((word & 0xffff) == 0) {
- num += 16;
- word >>= 16;
- }
- if ((word & 0xff) == 0) {
- num += 8;
- word >>= 8;
- }
- if ((word & 0xf) == 0) {
- num += 4;
- word >>= 4;
- }
- if ((word & 0x3) == 0) {
- num += 2;
- word >>= 2;
- }
- if ((word & 0x1) == 0)
- num += 1;
- return num;
-}
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
-
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
- if (!x)
- return 0;
- return __ffs((unsigned long)x) + 1;
-}
-
-/*
- * fls: find last (most-significant) bit set.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
- */
-static inline unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Linus sez that gcc can optimize the following correctly, we'll see if this
- * holds on the Sparc as it does for the ALPHA.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- *
- * Scheduler induced bitop, do not use.
- */
-static inline int find_next_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- int num = offset & ~0x1f;
- unsigned long word;
-
- word = *p++;
- word &= ~((1 << (offset & 0x1f)) - 1);
- while (num < size) {
- if (word != 0) {
- return __ffs(word) + num;
- }
- word = *p++;
- num += 0x20;
- }
- return num;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-/*
- */
-static inline int test_le_bit(int nr, __const__ unsigned long * addr)
-{
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
- return (ADDR[nr >> 3] >> (nr & 7)) & 1;
-}
-
-/*
- * non-atomic versions
- */
-static inline void __set_le_bit(int nr, unsigned long *addr)
-{
- unsigned char *ADDR = (unsigned char *)addr;
-
- ADDR += nr >> 3;
- *ADDR |= 1 << (nr & 0x07);
-}
-
-static inline void __clear_le_bit(int nr, unsigned long *addr)
-{
- unsigned char *ADDR = (unsigned char *)addr;
-
- ADDR += nr >> 3;
- *ADDR &= ~(1 << (nr & 0x07));
-}
-
-static inline int __test_and_set_le_bit(int nr, unsigned long *addr)
-{
- int mask, retval;
- unsigned char *ADDR = (unsigned char *)addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- retval = (mask & *ADDR) != 0;
- *ADDR |= mask;
- return retval;
-}
-
-static inline int __test_and_clear_le_bit(int nr, unsigned long *addr)
-{
- int mask, retval;
- unsigned char *ADDR = (unsigned char *)addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- retval = (mask & *ADDR) != 0;
- *ADDR &= ~mask;
- return retval;
-}
-
-static inline unsigned long find_next_zero_le_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp = __swab32(tmp) | (~0UL << size);
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
- return result + ffz(tmp);
-
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-
-#define find_first_zero_le_bit(addr, size) \
- find_next_zero_le_bit((addr), (size), 0)
-
-#define ext2_set_bit(nr,addr) \
- __test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define ext2_clear_bit(nr,addr) \
- __test_and_clear_le_bit((nr),(unsigned long *)(addr))
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- ({ \
- int ret; \
- spin_lock(lock); \
- ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
- spin_unlock(lock); \
- ret; \
- })
-
-#define ext2_test_bit(nr,addr) \
- test_le_bit((nr),(unsigned long *)(addr))
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_le_bit((unsigned long *)(addr), (size))
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) \
- test_and_set_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- set_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- test_and_clear_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- test_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((unsigned long *)(addr),(size))
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 6efc0162fb0..71944b0f09d 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr);
extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
extern void change_bit(unsigned long nr, volatile unsigned long *addr);
-/* "non-atomic" versions... */
-
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
- *m |= (1UL << (nr & 63));
-}
-
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
- *m &= ~(1UL << (nr & 63));
-}
-
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
-
- *m ^= (1UL << (nr & 63));
-}
-
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *m;
- unsigned long mask = (1UL << (nr & 63));
-
- *m = (old | mask);
- return ((old & mask) != 0);
-}
-
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *m;
- unsigned long mask = (1UL << (nr & 63));
-
- *m = (old & ~mask);
- return ((old & mask) != 0);
-}
-
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
- unsigned long old = *m;
- unsigned long mask = (1UL << (nr & 63));
-
- *m = (old ^ mask);
- return ((old & mask) != 0);
-}
+#include <asm-generic/bitops/non-atomic.h>
#ifdef CONFIG_SMP
#define smp_mb__before_clear_bit() membar_storeload_loadload()
@@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
#define smp_mb__after_clear_bit() barrier()
#endif
-static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
-{
- return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
-}
-
-/* The easy/cheese version for now. */
-static inline unsigned long ffz(unsigned long word)
-{
- unsigned long result;
-
- result = 0;
- while(word & 1) {
- result++;
- word >>= 1;
- }
- return result;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- unsigned long result = 0;
-
- while (!(word & 1UL)) {
- result++;
- word >>= 1;
- }
- return result;
-}
-
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
-#define fls64(x) generic_fls64(x)
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(((unsigned int)b[1])))
- return __ffs(b[1]) + 64;
- if (b[1] >> 32)
- return __ffs(b[1] >> 32) + 96;
- return __ffs(b[2]) + 128;
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
- if (!x)
- return 0;
- return __ffs((unsigned long)x) + 1;
-}
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
@@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w)
#else
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif
#endif /* __KERNEL__ */
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-extern unsigned long find_next_bit(const unsigned long *, unsigned long,
- unsigned long);
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
- * 'size' bits, starting the search at bit 'offset'. This is largely based
- * on Linus's ALPHA routines, which are pretty portable BTW.
- */
-
-extern unsigned long find_next_zero_bit(const unsigned long *,
- unsigned long, unsigned long);
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-#define test_and_set_le_bit(nr,addr) \
- test_and_set_bit((nr) ^ 0x38, (addr))
-#define test_and_clear_le_bit(nr,addr) \
- test_and_clear_bit((nr) ^ 0x38, (addr))
-
-static inline int test_le_bit(int nr, __const__ unsigned long * addr)
-{
- int mask;
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
-}
-
-#define find_first_zero_le_bit(addr, size) \
- find_next_zero_le_bit((addr), (size), 0)
-
-extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long);
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
-#define __set_le_bit(nr, addr) \
- __set_bit((nr) ^ 0x38, (addr))
-#define __clear_le_bit(nr, addr) \
- __clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_clear_le_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ 0x38, (addr))
-#define __test_and_set_le_bit(nr, addr) \
- __test_and_set_bit((nr) ^ 0x38, (addr))
+#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit(nr,addr) \
- __test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_set_bit_atomic(lock,nr,addr) \
- test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define ext2_clear_bit(nr,addr) \
- __test_and_clear_le_bit((nr),(unsigned long *)(addr))
+ test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock,nr,addr) \
- test_and_clear_le_bit((nr),(unsigned long *)(addr))
-#define ext2_test_bit(nr,addr) \
- test_le_bit((nr),(unsigned long *)(addr))
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_le_bit((unsigned long *)(addr), (size))
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
+ test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) \
- test_and_set_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- set_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- test_and_clear_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- test_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((unsigned long *)(addr),(size))
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 609b9e87222..1f6fd5ab417 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -22,25 +22,11 @@
#ifdef __KERNEL__
-/*
- * The __ functions are not atomic
- */
+#include <asm-generic/bitops/ffz.h>
/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
+ * The __ functions are not atomic
*/
-static inline unsigned long ffz (unsigned long word)
-{
- unsigned long result = 0;
-
- while (word & 1) {
- result++;
- word >>= 1;
- }
- return result;
-}
-
/* In the following constant-bit-op macros, a "g" constraint is used when
we really need an integer ("i" constraint). This is to avoid
@@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr)
#define smp_mb__before_clear_bit() barrier ()
#define smp_mb__after_clear_bit() barrier ()
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit ((addr), (size), 0)
-
-static inline int find_next_zero_bit(const void *addr, int size, int offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = * (p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~ (tmp = * (p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
- found_first:
- tmp |= ~0UL << size;
- found_middle:
- return result + ffz (tmp);
-}
-
-
-/* This is the same as generic_ffs, but we can't use that because it's
- inline and the #include order mucks things up. */
-static inline int generic_ffs_for_find_next_bit(int x)
-{
- int r = 1;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * Find next one bit in a bitmap reasonably efficiently.
- */
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + generic_ffs_for_find_next_bit(tmp);
-}
-
-/*
- * find_first_bit - find the first set bit in a memory region
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-
-#define ffs(x) generic_ffs (x)
-#define fls(x) generic_fls (x)
-#define fls64(x) generic_fls64(x)
-#define __ffs(x) ffs(x)
-
-
-/*
- * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes
- * that at least one bit is set, and returns the real index of the bit
- * (rather than the bit index + 1, like ffs does).
- */
-static inline int sched_ffs(int x)
-{
- int r = 0;
-
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is set.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
- unsigned offs = 0;
- while (! *b) {
- b++;
- offs += 32;
- }
- return sched_ffs (*b) + offs;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-#define hweight32(x) generic_hweight32 (x)
-#define hweight16(x) generic_hweight16 (x)
-#define hweight8(x) generic_hweight8 (x)
-
-#define ext2_set_bit test_and_set_bit
+#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit test_and_set_bit
-#define minix_set_bit set_bit
-#define minix_test_and_clear_bit test_and_clear_bit
-#define minix_test_bit test_bit
-#define minix_find_first_zero_bit find_first_zero_bit
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index eb4df23e1e4..79212128d0f 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word)
#ifdef __KERNEL__
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (b[0])
- return __ffs(b[0]);
- if (b[1])
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-}
+#include <asm-generic/bitops/sched.h>
/**
* ffs - find first bit set
@@ -412,43 +405,20 @@ static __inline__ int fls(int x)
return r+1;
}
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-#define ext2_set_bit(nr,addr) \
- __test_and_set_bit((nr),(unsigned long*)addr)
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr),(unsigned long*)addr)
-#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_bit((unsigned long*)addr, size, off)
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
-#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((void*)addr,size)
+
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index 0a2065f1a37..d815649617a 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -23,156 +23,11 @@
# error SMP not supported on this architecture
#endif
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
- *a |= mask;
- local_irq_restore(flags);
-}
-
-static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
-
- *a |= mask;
-}
-
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
- *a &= ~mask;
- local_irq_restore(flags);
-}
-
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
-
- *a &= ~mask;
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
-static __inline__ void change_bit(int nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
- *a ^= mask;
- local_irq_restore(flags);
-}
-
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
-
- *a ^= mask;
-}
-
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
-{
- unsigned long retval;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
- retval = (mask & *a) != 0;
- *a |= mask;
- local_irq_restore(flags);
-
- return retval;
-}
-
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- unsigned long retval;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
-
- retval = (mask & *a) != 0;
- *a |= mask;
-
- return retval;
-}
-
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
-{
- unsigned long retval;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
- retval = (mask & *a) != 0;
- *a &= ~mask;
- local_irq_restore(flags);
-
- return retval;
-}
-
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *a;
-
- *a = old & ~mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
-{
- unsigned long retval;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long flags;
-
- local_irq_save(flags);
-
- retval = (mask & *a) != 0;
- *a ^= mask;
- local_irq_restore(flags);
-
- return retval;
-}
-
-/*
- * non-atomic version; can be reordered
- */
-
-static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *a;
-
- *a = old ^ mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_bit(int nr, const volatile void *addr)
-{
- return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
-}
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
#if XCHAL_HAVE_NSA
@@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x)
{
return __cntlz(x);
}
-#define fls64(x) generic_fls64(x)
-
-static __inline__ int
-find_next_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-static __inline__ int
-find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *p++))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
#ifdef __XTENSA_EL__
-# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr))
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
-# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
-# define ext2_test_bit(nr,addr) test_bit((nr), (addr))
-# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size))
-# define ext2_find_next_zero_bit(addr, size, offset) \
- find_next_zero_bit((addr), (size), (offset))
#elif defined(__XTENSA_EB__)
-# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr))
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
-# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
-# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr))
-# define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease preformance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-
#else
# error processor byte order undefined!
#endif
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Find the first bit set in a 140-bit bitmap.
- * The first 100 bits are unlikely to be set.
- */
-
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-
-/* Bitmap functions for the minix filesystem. */
-
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index f17525a963d..5d1eabcde5d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -3,88 +3,11 @@
#include <asm/types.h>
/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-static inline int generic_ffs(int x)
-{
- int r = 1;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
-}
-
-/*
- * fls: find last bit set.
- */
-
-static __inline__ int generic_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
-
-static inline int generic_fls64(__u64 x)
-{
- __u32 h = x >> 32;
- if (h)
- return fls(h) + 32;
- return fls(x);
-}
-
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
@@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count)
return order;
}
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-static inline unsigned int generic_hweight32(unsigned int w)
-{
- unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
- res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
- res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
- res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
- return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
-}
-
-static inline unsigned int generic_hweight16(unsigned int w)
-{
- unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
- res = (res & 0x3333) + ((res >> 2) & 0x3333);
- res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
- return (res & 0x00FF) + ((res >> 8) & 0x00FF);
-}
-
-static inline unsigned int generic_hweight8(unsigned int w)
-{
- unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
- res = (res & 0x33) + ((res >> 2) & 0x33);
- return (res & 0x0F) + ((res >> 4) & 0x0F);
-}
-
-static inline unsigned long generic_hweight64(__u64 w)
-{
-#if BITS_PER_LONG < 64
- return generic_hweight32((unsigned int)(w >> 32)) +
- generic_hweight32((unsigned int)w);
-#else
- u64 res;
- res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
- res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
- res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
- res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
- res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
- return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
-#endif
-}
-
static inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
/*
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 9f159baf153..fb7e9b7ccbe 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -46,25 +46,28 @@ struct address_space;
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
/*
- * Keep related fields in common cachelines. The most commonly accessed
- * field (b_state) goes at the start so the compiler does not generate
- * indexed addressing for it.
+ * Historically, a buffer_head was used to map a single block
+ * within a page, and of course as the unit of I/O through the
+ * filesystem and block layers. Nowadays the basic I/O unit
+ * is the bio, and buffer_heads are used for extracting block
+ * mappings (via a get_block_t call), for tracking state within
+ * a page (via a page_mapping) and for wrapping bio submission
+ * for backward compatibility reasons (e.g. submit_bh).
*/
struct buffer_head {
- /* First cache line: */
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
struct page *b_page; /* the page this bh is mapped to */
- atomic_t b_count; /* users using this block */
- u32 b_size; /* block size */
- sector_t b_blocknr; /* block number */
- char *b_data; /* pointer to data block */
+ sector_t b_blocknr; /* start block number */
+ size_t b_size; /* size of mapping */
+ char *b_data; /* pointer to data within the page */
struct block_device *b_bdev;
bh_end_io_t *b_end_io; /* I/O completion */
void *b_private; /* reserved for b_end_io */
struct list_head b_assoc_buffers; /* associated with another mapping */
+ atomic_t b_count; /* users using this buffer_head */
};
/*
@@ -189,8 +192,8 @@ extern int buffer_heads_over_limit;
* address_spaces.
*/
int try_to_release_page(struct page * page, gfp_t gfp_mask);
-int block_invalidatepage(struct page *page, unsigned long offset);
-int do_invalidatepage(struct page *page, unsigned long offset);
+void block_invalidatepage(struct page *page, unsigned long offset);
+void do_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
@@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
int generic_cont_expand(struct inode *inode, loff_t size);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
-int block_sync_page(struct page *);
+void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
@@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
set_buffer_mapped(bh);
bh->b_bdev = sb->s_bdev;
bh->b_blocknr = block;
+ bh->b_size = sb->s_blocksize;
}
/*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c9ab2a26348..24d659cdbaf 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -45,6 +45,32 @@ struct compat_tms {
compat_clock_t tms_cstime;
};
+struct compat_timex {
+ compat_uint_t modes;
+ compat_long_t offset;
+ compat_long_t freq;
+ compat_long_t maxerror;
+ compat_long_t esterror;
+ compat_int_t status;
+ compat_long_t constant;
+ compat_long_t precision;
+ compat_long_t tolerance;
+ struct compat_timeval time;
+ compat_long_t tick;
+ compat_long_t ppsfreq;
+ compat_long_t jitter;
+ compat_int_t shift;
+ compat_long_t stabil;
+ compat_long_t jitcnt;
+ compat_long_t calcnt;
+ compat_long_t errcnt;
+ compat_long_t stbcnt;
+
+ compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
+ compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
+ compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
+};
+
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
typedef struct {
@@ -181,5 +207,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
return lhs->tv_nsec - rhs->tv_nsec;
}
+asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c7c5dd31618..e203613d3ae 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -240,19 +240,21 @@ struct efi_memory_map {
unsigned long desc_size;
};
+#define EFI_INVALID_TABLE_ADDR (~0UL)
+
/*
* All runtime access to EFI goes through this structure:
*/
extern struct efi {
efi_system_table_t *systab; /* EFI system table */
- void *mps; /* MPS table */
- void *acpi; /* ACPI table (IA64 ext 0.71) */
- void *acpi20; /* ACPI table (ACPI 2.0) */
- void *smbios; /* SM BIOS table */
- void *sal_systab; /* SAL system table */
- void *boot_info; /* boot info table */
- void *hcdp; /* HCDP table */
- void *uga; /* UGA table */
+ unsigned long mps; /* MPS table */
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SM BIOS table */
+ unsigned long sal_systab; /* SAL system table */
+ unsigned long boot_info; /* boot info table */
+ unsigned long hcdp; /* HCDP table */
+ unsigned long uga; /* UGA table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
+extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
+ u64 attr);
extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource);
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e7239f2f97a..8bb4f842cde 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -36,7 +36,8 @@ struct statfs;
* Define EXT3_RESERVATION to reserve data blocks for expanding files
*/
#define EXT3_DEFAULT_RESERVE_BLOCKS 8
-#define EXT3_MAX_RESERVE_BLOCKS 1024
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT3_MAX_RESERVE_BLOCKS 1027
#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
/*
* Always enable hashed directories
@@ -732,6 +733,8 @@ struct dir_private_info {
extern int ext3_bg_has_super(struct super_block *sb, int group);
extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
+extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long,
+ unsigned long *, int *);
extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
unsigned long);
extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
@@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-int ext3_get_block_handle(handle_t *handle, struct inode *inode,
- sector_t iblock, struct buffer_head *bh_result, int create,
- int extend_disksize);
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+ sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
+ int create, int extend_disksize);
extern void ext3_read_inode (struct inode *);
extern int ext3_write_inode (struct inode *, int);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5adf32b90f3..9d967494695 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -252,9 +252,6 @@ extern void __init files_init(unsigned long);
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
- unsigned long max_blocks,
- struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
@@ -350,7 +347,7 @@ struct writeback_control;
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
- int (*sync_page)(struct page *);
+ void (*sync_page)(struct page *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
@@ -369,7 +366,7 @@ struct address_space_operations {
int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- int (*invalidatepage) (struct page *, unsigned long);
+ void (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
@@ -490,7 +487,7 @@ struct inode {
unsigned int i_blkbits;
unsigned long i_blksize;
unsigned long i_version;
- unsigned long i_blocks;
+ blkcnt_t i_blocks;
unsigned short i_bytes;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
struct mutex i_mutex;
@@ -763,6 +760,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *);
+extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file *, struct file_lock *);
@@ -1644,7 +1642,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
+ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
int lock_type);
enum {
@@ -1655,29 +1653,29 @@ enum {
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_blocks, end_io, DIO_LOCKING);
+ nr_segs, get_block, end_io, DIO_LOCKING);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_blocks, end_io, DIO_NO_LOCKING);
+ nr_segs, get_block, end_io, DIO_NO_LOCKING);
}
static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_blocks, end_io, DIO_OWN_LOCKING);
+ nr_segs, get_block, end_io, DIO_OWN_LOCKING);
}
extern struct file_operations generic_ro_fops;
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 2401dea2b86..9c8e6da2393 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name
}
/*
- * Use the following fucntions to manipulate gameport's per-port
+ * Use the following functions to manipulate gameport's per-port
* driver-specific data.
*/
static inline void *gameport_get_drvdata(struct gameport *gameport)
@@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
}
/*
- * Use the following fucntions to pin gameport's driver in process context
+ * Use the following functions to pin gameport's driver in process context
*/
static inline int gameport_pin_driver(struct gameport *gameport)
{
diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h
new file mode 100644
index 00000000000..70ad09c8ad1
--- /dev/null
+++ b/include/linux/gigaset_dev.h
@@ -0,0 +1,32 @@
+/*
+ * interface to user space for the gigaset driver
+ *
+ * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
+ *
+ * =====================================================================
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ * =====================================================================
+ * Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $
+ * =====================================================================
+ */
+
+#ifndef GIGASET_INTERFACE_H
+#define GIGASET_INTERFACE_H
+
+#include <linux/ioctl.h>
+
+#define GIGASET_IOCTL 0x47
+
+#define GIGVER_DRIVER 0
+#define GIGVER_COMPAT 1
+#define GIGVER_FWBASE 2
+
+#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int)
+#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int)
+#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay?
+#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4])
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6bece9280eb..892c4ea1b42 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,18 @@
#include <asm/cacheflush.h>
+#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+}
+#endif
+
+#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+}
+#endif
+
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 27238194b21..707f7cb9e79 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -3,6 +3,8 @@
#include <linux/compiler.h>
+#ifdef __KERNEL__
+
/*
* Offsets into HPET Registers
*/
@@ -85,22 +87,6 @@ struct hpet {
#define Tn_FSB_INT_ADDR_SHIFT (32UL)
#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL)
-struct hpet_info {
- unsigned long hi_ireqfreq; /* Hz */
- unsigned long hi_flags; /* information */
- unsigned short hi_hpet;
- unsigned short hi_timer;
-};
-
-#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
-
-#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
-#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
-#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
-#define HPET_EPI _IO('h', 0x04) /* enable periodic */
-#define HPET_DPI _IO('h', 0x05) /* disable periodic */
-#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
-
/*
* exported interfaces
*/
@@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int);
int hpet_unregister(struct hpet_task *);
int hpet_control(struct hpet_task *, unsigned int, unsigned long);
+#endif /* __KERNEL__ */
+
+struct hpet_info {
+ unsigned long hi_ireqfreq; /* Hz */
+ unsigned long hi_flags; /* information */
+ unsigned short hi_hpet;
+ unsigned short hi_timer;
+};
+
+#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
+
+#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
+#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
+#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
+#define HPET_EPI _IO('h', 0x04) /* enable periodic */
+#define HPET_DPI _IO('h', 0x05) /* disable periodic */
+#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
+
#endif /* !__HPET__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 6401c31d6ad..93830158348 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -34,15 +34,7 @@ enum hrtimer_restart {
HRTIMER_RESTART,
};
-/*
- * Timer states:
- */
-enum hrtimer_state {
- HRTIMER_INACTIVE, /* Timer is inactive */
- HRTIMER_EXPIRED, /* Timer is expired */
- HRTIMER_RUNNING, /* Timer is running the callback function */
- HRTIMER_PENDING, /* Timer is pending */
-};
+#define HRTIMER_INACTIVE ((void *)1UL)
struct hrtimer_base;
@@ -53,9 +45,7 @@ struct hrtimer_base;
* @expires: the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
* which the timer is based.
- * @state: state of the timer
* @function: timer expiry callback function
- * @data: argument for the callback function
* @base: pointer to the timer base (per cpu and per clock)
*
* The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
@@ -63,23 +53,23 @@ struct hrtimer_base;
struct hrtimer {
struct rb_node node;
ktime_t expires;
- enum hrtimer_state state;
- int (*function)(void *);
- void *data;
+ int (*function)(struct hrtimer *);
struct hrtimer_base *base;
};
/**
* struct hrtimer_base - the timer base for a specific clock
*
- * @index: clock type index for per_cpu support when moving a timer
- * to a base on another cpu.
- * @lock: lock protecting the base and associated timers
- * @active: red black tree root node for the active timers
- * @first: pointer to the timer node which expires first
- * @resolution: the resolution of the clock, in nanoseconds
- * @get_time: function to retrieve the current time of the clock
- * @curr_timer: the timer which is executing a callback right now
+ * @index: clock type index for per_cpu support when moving a timer
+ * to a base on another cpu.
+ * @lock: lock protecting the base and associated timers
+ * @active: red black tree root node for the active timers
+ * @first: pointer to the timer node which expires first
+ * @resolution: the resolution of the clock, in nanoseconds
+ * @get_time: function to retrieve the current time of the clock
+ * @get_sofirq_time: function to retrieve the current time from the softirq
+ * @curr_timer: the timer which is executing a callback right now
+ * @softirq_time: the time when running the hrtimer queue in the softirq
*/
struct hrtimer_base {
clockid_t index;
@@ -88,7 +78,9 @@ struct hrtimer_base {
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
+ ktime_t (*get_softirq_time)(void);
struct hrtimer *curr_timer;
+ ktime_t softirq_time;
};
/*
@@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void);
static inline int hrtimer_active(const struct hrtimer *timer)
{
- return timer->state == HRTIMER_PENDING;
+ return timer->node.rb_parent != HRTIMER_INACTIVE;
}
/* Forward a hrtimer so it expires after now: */
-extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);
+extern unsigned long
+hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
/* Precise sleep: */
extern long hrtimer_nanosleep(struct timespec *rqtp,
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 5a9d8c59917..dd7d627bf66 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
if (!pool->slab)
goto free_name;
- pool->mempool =
- mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
- pool->slab);
+ pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
if (!pool->mempool)
goto free_slab;
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index d6276e60b3b..0a84b56935c 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -36,6 +36,7 @@
#include <linux/ipmi_msgdefs.h>
#include <linux/compiler.h>
+#include <linux/device.h>
/*
* This file describes an interface to an IPMI driver. You have to
@@ -397,7 +398,7 @@ struct ipmi_smi_watcher
the watcher list. So you can add and remove users from the
IPMI interface, send messages, etc., but you cannot add
or remove SMI watchers or SMI interfaces. */
- void (*new_smi)(int if_num);
+ void (*new_smi)(int if_num, struct device *dev);
void (*smi_gone)(int if_num);
};
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 03bc64dc2ec..22f5e2afda4 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -47,6 +47,7 @@
#define IPMI_NETFN_APP_RESPONSE 0x07
#define IPMI_GET_DEVICE_ID_CMD 0x01
#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
+#define IPMI_GET_DEVICE_GUID_CMD 0x08
#define IPMI_GET_MSG_FLAGS_CMD 0x31
#define IPMI_SEND_MSG_CMD 0x34
#define IPMI_GET_MSG_CMD 0x33
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index e36ee157ad6..53571288a9f 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -37,6 +37,9 @@
#include <linux/ipmi_msgdefs.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ipmi_smi.h>
/* This files describes the interface for IPMI system management interface
drivers to bind into the IPMI message handler. */
@@ -113,12 +116,52 @@ struct ipmi_smi_handlers
void (*dec_usecount)(void *send_info);
};
+struct ipmi_device_id {
+ unsigned char device_id;
+ unsigned char device_revision;
+ unsigned char firmware_revision_1;
+ unsigned char firmware_revision_2;
+ unsigned char ipmi_version;
+ unsigned char additional_device_support;
+ unsigned int manufacturer_id;
+ unsigned int product_id;
+ unsigned char aux_firmware_revision[4];
+ unsigned int aux_firmware_revision_set : 1;
+};
+
+#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
+#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
+
+/* Take a pointer to a raw data buffer and a length and extract device
+ id information from it. The first byte of data must point to the
+ byte from the get device id response after the completion code.
+ The caller is responsible for making sure the length is at least
+ 11 and the command completed without error. */
+static inline void ipmi_demangle_device_id(unsigned char *data,
+ unsigned int data_len,
+ struct ipmi_device_id *id)
+{
+ id->device_id = data[0];
+ id->device_revision = data[1];
+ id->firmware_revision_1 = data[2];
+ id->firmware_revision_2 = data[3];
+ id->ipmi_version = data[4];
+ id->additional_device_support = data[5];
+ id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16);
+ id->product_id = data[9] | (data[10] << 8);
+ if (data_len >= 15) {
+ memcpy(id->aux_firmware_revision, data+11, 4);
+ id->aux_firmware_revision_set = 1;
+ } else
+ id->aux_firmware_revision_set = 0;
+}
+
/* Add a low-level interface to the IPMI driver. Note that if the
interface doesn't know its slave address, it should pass in zero. */
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
- unsigned char version_major,
- unsigned char version_minor,
+ struct ipmi_device_id *device_id,
+ struct device *dev,
unsigned char slave_addr,
ipmi_smi_t *intf);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 4fc7dffd66e..6a425e370cb 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -895,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void journal_release_buffer (handle_t *, struct buffer_head *);
extern int journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
-extern int journal_invalidatepage(journal_t *,
+extern void journal_invalidatepage(journal_t *,
struct page *, unsigned long);
extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int journal_stop(handle_t *);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index f3dec45ef87..62bc5758070 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -64,9 +64,6 @@ typedef union {
#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
-/* Define a ktime_t variable and initialize it to zero: */
-#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
-
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
*
@@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
-/* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */
-#define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64)
-
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
#define ktime_to_ns(kt) ((kt).tv64)
@@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
* tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
*/
-/* Define a ktime_t variable and initialize it to zero: */
-#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
-
/* Set a ktime_t variable to a value in sec/nsec representation: */
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
{
@@ -255,17 +246,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
}
/**
- * ktime_to_clock_t - convert a ktime_t variable to clock_t format
- * @kt: the ktime_t variable to convert
- *
- * Returns a clock_t variable with the converted value
- */
-static inline clock_t ktime_to_clock_t(const ktime_t kt)
-{
- return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec);
-}
-
-/**
* ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
* @kt: the ktime_t variable to convert
*
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index f2427d7394b..9be484d1128 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -6,6 +6,8 @@
#include <linux/wait.h>
+struct kmem_cache;
+
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
@@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool);
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
+static inline mempool_t *
+mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
+{
+ return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+ (void *) kc);
+}
+
+/*
+ * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree
+ * the amount of memory specified by pool_data
+ */
+void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
+void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
+void mempool_kfree(void *element, void *pool_data);
+static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
+{
+ return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
+ (void *) size);
+}
+static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size)
+{
+ return mempool_create(min_nr, mempool_kzalloc, mempool_kfree,
+ (void *) size);
+}
+
+/*
+ * A mempool_alloc_t and mempool_free_t for a simple page allocator that
+ * allocates pages of the order specified by pool_data
+ */
+void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
+void mempool_free_pages(void *element, void *pool_data);
+static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
+{
+ return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
+ (void *)(long)order);
+}
#endif /* _LINUX_MEMPOOL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index aa6322d4519..cb224cf653b 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/spinlock.h>
#include <asm/atomic.h>
/*
@@ -55,7 +56,7 @@ struct proc_dir_entry {
nlink_t nlink;
uid_t uid;
gid_t gid;
- unsigned long size;
+ loff_t size;
struct inode_operations * proc_iops;
struct file_operations * proc_fops;
get_info_t *get_info;
@@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus;
extern struct proc_dir_entry *proc_root_driver;
extern struct proc_dir_entry *proc_root_kcore;
+extern spinlock_t proc_subdir_lock;
+
extern void proc_root_init(void);
extern void proc_misc_init(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0054c1b9a0..036d14d2bf9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -402,6 +402,7 @@ struct signal_struct {
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
+ struct task_struct *tsk;
ktime_t it_real_incr;
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index aa4d6493a03..690aabca8ed 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio)
}
/*
- * Use the following fucntions to manipulate serio's per-port
+ * Use the following functions to manipulate serio's per-port
* driver-specific data.
*/
static inline void *serio_get_drvdata(struct serio *serio)
@@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data)
}
/*
- * Use the following fucntions to protect critical sections in
+ * Use the following functions to protect critical sections in
* driver code from port's interrupt handler
*/
static inline void serio_pause_rx(struct serio *serio)
@@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio)
}
/*
- * Use the following fucntions to pin serio's driver in process context
+ * Use the following functions to pin serio's driver in process context
*/
static inline int serio_pin_driver(struct serio *serio)
{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index d699a16b0cb..e2fa3ab4afc 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void);
*/
#define raw_smp_processor_id() 0
#define hard_smp_processor_id() 0
-#define smp_call_function(func,info,retry,wait) ({ 0; })
+static inline int up_smp_call_function(void)
+{
+ return 0;
+}
+#define smp_call_function(func,info,retry,wait) (up_smp_call_function())
#define on_each_cpu(func,info,retry,wait) \
({ \
local_irq_disable(); \
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 8ff2a122dfe..8669291352d 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -69,7 +69,7 @@ struct kstat {
struct timespec mtime;
struct timespec ctime;
unsigned long blksize;
- unsigned long blocks;
+ unsigned long long blocks;
};
#endif
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
index ad83a2bdb82..b34cc829f98 100644
--- a/include/linux/statfs.h
+++ b/include/linux/statfs.h
@@ -8,11 +8,11 @@
struct kstatfs {
long f_type;
long f_bsize;
- sector_t f_blocks;
- sector_t f_bfree;
- sector_t f_bavail;
- sector_t f_files;
- sector_t f_ffree;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
__kernel_fsid_t f_fsid;
long f_namelen;
long f_frsize;
diff --git a/include/linux/time.h b/include/linux/time.h
index bf0e785e2e0..0cd696cee99 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -73,12 +73,6 @@ extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
#define timespec_valid(ts) \
(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
-/*
- * 64-bit nanosec type. Large enough to span 292+ years in nanosecond
- * resolution. Ought to be enough for a while.
- */
-typedef s64 nsec_t;
-
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
@@ -114,9 +108,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
* Returns the scalar nanosecond representation of the timespec
* parameter.
*/
-static inline nsec_t timespec_to_ns(const struct timespec *ts)
+static inline s64 timespec_to_ns(const struct timespec *ts)
{
- return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
/**
@@ -126,9 +120,9 @@ static inline nsec_t timespec_to_ns(const struct timespec *ts)
* Returns the scalar nanosecond representation of the timeval
* parameter.
*/
-static inline nsec_t timeval_to_ns(const struct timeval *tv)
+static inline s64 timeval_to_ns(const struct timeval *tv)
{
- return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) +
+ return ((s64) tv->tv_sec * NSEC_PER_SEC) +
tv->tv_usec * NSEC_PER_USEC;
}
@@ -138,7 +132,7 @@ static inline nsec_t timeval_to_ns(const struct timeval *tv)
*
* Returns the timespec representation of the nsec parameter.
*/
-extern struct timespec ns_to_timespec(const nsec_t nsec);
+extern struct timespec ns_to_timespec(const s64 nsec);
/**
* ns_to_timeval - Convert nanoseconds to timeval
@@ -146,7 +140,7 @@ extern struct timespec ns_to_timespec(const nsec_t nsec);
*
* Returns the timeval representation of the nsec parameter.
*/
-extern struct timeval ns_to_timeval(const nsec_t nsec);
+extern struct timeval ns_to_timeval(const s64 nsec);
#endif /* __KERNEL__ */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index ee5a09e806e..b5caabca553 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -96,6 +96,7 @@ static inline void add_timer(struct timer_list *timer)
extern void init_timers(void);
extern void run_local_timers(void);
-extern int it_real_fn(void *);
+struct hrtimer;
+extern int it_real_fn(struct hrtimer *);
#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 82dc9ae79d3..03914b7e41b 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -307,6 +307,8 @@ time_interpolator_reset(void)
/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
extern u64 current_tick_length(void);
+extern int do_adjtimex(struct timex *);
+
#endif /* KERNEL */
#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index 54ae2d59e71..1046c7ad86d 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -137,6 +137,10 @@ typedef __s64 int64_t;
typedef unsigned long sector_t;
#endif
+#ifndef HAVE_BLKCNT_T
+typedef unsigned long blkcnt_t;
+#endif
+
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
diff --git a/init/initramfs.c b/init/initramfs.c
index 77b934cccef..679d870d991 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -519,7 +519,7 @@ void __init populate_rootfs(void)
return;
}
printk("it isn't (%s); looks like an initrd\n", err);
- fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700);
+ fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
initrd_end - initrd_start);
diff --git a/init/main.c b/init/main.c
index 006dcd547dc..64466ea1984 100644
--- a/init/main.c
+++ b/init/main.c
@@ -645,24 +645,6 @@ static void run_init_process(char *init_filename)
execve(init_filename, argv_init, envp_init);
}
-static inline void fixup_cpu_present_map(void)
-{
-#ifdef CONFIG_SMP
- int i;
-
- /*
- * If arch is not hotplug ready and did not populate
- * cpu_present_map, just make cpu_present_map same as cpu_possible_map
- * for other cpu bringup code to function as normal. e.g smp_init() etc.
- */
- if (cpus_empty(cpu_present_map)) {
- for_each_cpu(i) {
- cpu_set(i, cpu_present_map);
- }
- }
-#endif
-}
-
static int init(void * unused)
{
lock_kernel();
@@ -684,7 +666,6 @@ static int init(void * unused)
do_pre_smp_initcalls();
- fixup_cpu_present_map();
smp_init();
sched_init_smp();
diff --git a/ipc/compat.c b/ipc/compat.c
index 1fe95f6659d..a544dfbb082 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include "util.h"
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 85c52fd26bf..a3bb0c8201c 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -25,6 +25,8 @@
#include <linux/netlink.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
+#include <linux/mutex.h>
+
#include <net/sock.h>
#include "util.h"
@@ -760,7 +762,7 @@ out_unlock:
* The receiver accepts the message and returns without grabbing the queue
* spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
* are necessary. The same algorithm is used for sysv semaphores, see
- * ipc/sem.c fore more details.
+ * ipc/mutex.c fore more details.
*
* The same algorithm is used for senders.
*/
diff --git a/ipc/msg.c b/ipc/msg.c
index 7eec5ed3237..48a7f17a723 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -28,6 +28,8 @@
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
+
#include <asm/current.h>
#include <asm/uaccess.h>
#include "util.h"
@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res)
* removes the message queue from message queue ID
* array, and cleans up all the messages associated with this queue.
*
- * msg_ids.sem and the spinlock for this message queue is hold
- * before freeque() is called. msg_ids.sem remains locked on exit.
+ * msg_ids.mutex and the spinlock for this message queue is hold
+ * before freeque() is called. msg_ids.mutex remains locked on exit.
*/
static void freeque (struct msg_queue *msq, int id)
{
@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
int id, ret = -EPERM;
struct msg_queue *msq;
- down(&msg_ids.sem);
+ mutex_lock(&msg_ids.mutex);
if (key == IPC_PRIVATE)
ret = newque(key, msgflg);
else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
}
msg_unlock(msq);
}
- up(&msg_ids.sem);
+ mutex_unlock(&msg_ids.mutex);
return ret;
}
@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
msginfo.msgmnb = msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
- down(&msg_ids.sem);
+ mutex_lock(&msg_ids.mutex);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids.in_use;
msginfo.msgmap = atomic_read(&msg_hdrs);
@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
msginfo.msgtql = MSGTQL;
}
max_id = msg_ids.max_id;
- up(&msg_ids.sem);
+ mutex_unlock(&msg_ids.mutex);
if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0: max_id;
@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
return -EINVAL;
}
- down(&msg_ids.sem);
+ mutex_lock(&msg_ids.mutex);
msq = msg_lock(msqid);
err=-EINVAL;
if (msq == NULL)
@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
}
err = 0;
out_up:
- up(&msg_ids.sem);
+ mutex_unlock(&msg_ids.mutex);
return err;
out_unlock_up:
msg_unlock(msq);
diff --git a/ipc/sem.c b/ipc/sem.c
index 18a78fe9c55..642659cd596 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -75,6 +75,8 @@
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
+
#include <asm/uaccess.h>
#include "util.h"
@@ -139,7 +141,7 @@ void __init sem_init (void)
* * if it's IN_WAKEUP, then it must wait until the value changes
* * if it's not -EINTR, then the operation was completed by
* update_queue. semtimedop can return queue.status without
- * performing any operation on the semaphore array.
+ * performing any operation on the sem array.
* * otherwise it must acquire the spinlock and check what's up.
*
* The two-stage algorithm is necessary to protect against the following
@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
if (nsems < 0 || nsems > sc_semmsl)
return -EINVAL;
- down(&sem_ids.sem);
+ mutex_lock(&sem_ids.mutex);
if (key == IPC_PRIVATE) {
err = newary(key, nsems, semflg);
@@ -241,7 +243,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
sem_unlock(sma);
}
- up(&sem_ids.sem);
+ mutex_unlock(&sem_ids.mutex);
return err;
}
@@ -436,8 +438,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
return semzcnt;
}
-/* Free a semaphore set. freeary() is called with sem_ids.sem down and
- * the spinlock for this semaphore set hold. sem_ids.sem remains locked
+/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and
+ * the spinlock for this semaphore set hold. sem_ids.mutex remains locked
* on exit.
*/
static void freeary (struct sem_array *sma, int id)
@@ -524,7 +526,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
seminfo.semmnu = SEMMNU;
seminfo.semmap = SEMMAP;
seminfo.semume = SEMUME;
- down(&sem_ids.sem);
+ mutex_lock(&sem_ids.mutex);
if (cmd == SEM_INFO) {
seminfo.semusz = sem_ids.in_use;
seminfo.semaem = used_sems;
@@ -533,7 +535,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
seminfo.semaem = SEMAEM;
}
max_id = sem_ids.max_id;
- up(&sem_ids.sem);
+ mutex_unlock(&sem_ids.mutex);
if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_id < 0) ? 0: max_id;
@@ -884,9 +886,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
return err;
case IPC_RMID:
case IPC_SET:
- down(&sem_ids.sem);
+ mutex_lock(&sem_ids.mutex);
err = semctl_down(semid,semnum,cmd,version,arg);
- up(&sem_ids.sem);
+ mutex_unlock(&sem_ids.mutex);
return err;
default:
return -EINVAL;
@@ -1297,9 +1299,9 @@ found:
/* perform adjustments registered in u */
nsems = sma->sem_nsems;
for (i = 0; i < nsems; i++) {
- struct sem * sem = &sma->sem_base[i];
+ struct sem * semaphore = &sma->sem_base[i];
if (u->semadj[i]) {
- sem->semval += u->semadj[i];
+ semaphore->semval += u->semadj[i];
/*
* Range checks of the new semaphore value,
* not defined by sus:
@@ -1313,11 +1315,11 @@ found:
*
* Manfred <manfred@colorfullife.com>
*/
- if (sem->semval < 0)
- sem->semval = 0;
- if (sem->semval > SEMVMX)
- sem->semval = SEMVMX;
- sem->sempid = current->tgid;
+ if (semaphore->semval < 0)
+ semaphore->semval = 0;
+ if (semaphore->semval > SEMVMX)
+ semaphore->semval = SEMVMX;
+ semaphore->sempid = current->tgid;
}
}
sma->sem_otime = get_seconds();
diff --git a/ipc/shm.c b/ipc/shm.c
index 6f9615c09fb..f806a2e314e 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -30,6 +30,7 @@
#include <linux/capability.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd)
*
* @shp: struct to free
*
- * It has to be called with shp and shm_ids.sem locked,
+ * It has to be called with shp and shm_ids.mutex locked,
* but returns with shp unlocked and freed.
*/
static void shm_destroy (struct shmid_kernel *shp)
@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd)
int id = file->f_dentry->d_inode->i_ino;
struct shmid_kernel *shp;
- down (&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
/* remove from the list of attaches of the shm segment */
if(!(shp = shm_lock(id)))
BUG();
@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd)
shm_destroy (shp);
else
shm_unlock(shp);
- up (&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
}
static int shm_mmap(struct file * file, struct vm_area_struct * vma)
@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
struct shmid_kernel *shp;
int err, id = 0;
- down(&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
if (key == IPC_PRIVATE) {
err = newseg(key, shmflg, size);
} else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
}
shm_unlock(shp);
}
- up(&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
return err;
}
@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
return err;
memset(&shm_info,0,sizeof(shm_info));
- down(&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
shm_info.used_ids = shm_ids.in_use;
shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
shm_info.shm_tot = shm_tot;
shm_info.swap_attempts = 0;
shm_info.swap_successes = 0;
err = shm_ids.max_id;
- up(&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
err = -EFAULT;
goto out;
@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
* Instead we set a destroyed flag, and then blow
* the name away when the usage hits zero.
*/
- down(&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
shp = shm_lock(shmid);
err = -EINVAL;
if (shp == NULL)
@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
shm_unlock(shp);
} else
shm_destroy (shp);
- up(&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
goto out;
}
@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
err = -EFAULT;
goto out;
}
- down(&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
shp = shm_lock(shmid);
err=-EINVAL;
if(shp==NULL)
goto out_up;
- if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm))))
+ if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
+ setbuf.mode, &(shp->shm_perm))))
goto out_unlock_up;
err = shm_checkid(shp,shmid);
if(err)
@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
out_unlock_up:
shm_unlock(shp);
out_up:
- up(&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
goto out;
out_unlock:
shm_unlock(shp);
@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
invalid:
up_write(&current->mm->mmap_sem);
- down (&shm_ids.sem);
+ mutex_lock(&shm_ids.mutex);
if(!(shp = shm_lock(shmid)))
BUG();
shp->shm_nattch--;
@@ -780,7 +782,7 @@ invalid:
shm_destroy (shp);
else
shm_unlock(shp);
- up (&shm_ids.sem);
+ mutex_unlock(&shm_ids.mutex);
*raddr = (unsigned long) user_addr;
err = 0;
diff --git a/ipc/util.c b/ipc/util.c
index 862621980b0..23151ef3259 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -68,7 +68,8 @@ __initcall(ipc_init);
void __init ipc_init_ids(struct ipc_ids* ids, int size)
{
int i;
- sema_init(&ids->sem,1);
+
+ mutex_init(&ids->mutex);
if(size > IPCMNI)
size = IPCMNI;
@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
* @ids: Identifier set
* @key: The key to find
*
- * Requires ipc_ids.sem locked.
+ * Requires ipc_ids.mutex locked.
* Returns the identifier if found or -1 if not.
*/
@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
/*
* rcu_dereference() is not needed here
- * since ipc_ids.sem is held
+ * since ipc_ids.mutex is held
*/
for (id = 0; id <= max_id; id++) {
p = ids->entries->p[id];
@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
}
/*
- * Requires ipc_ids.sem locked
+ * Requires ipc_ids.mutex locked
*/
static int grow_ary(struct ipc_ids* ids, int newsize)
{
@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize)
* is returned. The list is returned in a locked state on success.
* On failure the list is not locked and -1 is returned.
*
- * Called with ipc_ids.sem held.
+ * Called with ipc_ids.mutex held.
*/
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
/*
* rcu_dereference()() is not needed here since
- * ipc_ids.sem is held
+ * ipc_ids.mutex is held
*/
for (id = 0; id < size; id++) {
if(ids->entries->p[id] == NULL)
@@ -257,7 +258,7 @@ found:
* fed an invalid identifier. The entry is removed and internal
* variables recomputed. The object associated with the identifier
* is returned.
- * ipc_ids.sem and the spinlock for this ID is hold before this function
+ * ipc_ids.mutex and the spinlock for this ID is hold before this function
* is called, and remain locked on the exit.
*/
@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
/*
* do not need a rcu_dereference()() here to force ordering
- * on Alpha, since the ipc_ids.sem is held.
+ * on Alpha, since the ipc_ids.mutex is held.
*/
p = ids->entries->p[lid];
ids->entries->p[lid] = NULL;
@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
/*
* So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
- * is called with shm_ids.sem locked. Since grow_ary() is also called with
- * shm_ids.sem down(for Shared Memory), there is no need to add read
+ * is called with shm_ids.mutex locked. Since grow_ary() is also called with
+ * shm_ids.mutex down(for Shared Memory), there is no need to add read
* barriers here to gurantee the writes in grow_ary() are seen in order
* here (for Alpha).
*
- * However ipc_get() itself does not necessary require ipc_ids.sem down. So
- * if in the future ipc_get() is used by other places without ipc_ids.sem
+ * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
+ * if in the future ipc_get() is used by other places without ipc_ids.mutex
* down, then ipc_get() needs read memery barriers as ipc_lock() does.
*/
struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
* Take the lock - this will be released by the corresponding
* call to stop().
*/
- down(&iface->ids->sem);
+ mutex_lock(&iface->ids->mutex);
/* pos < 0 is invalid */
if (*pos < 0)
@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
ipc_unlock(ipc);
/* Release the lock we took in start() */
- up(&iface->ids->sem);
+ mutex_unlock(&iface->ids->mutex);
}
static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index efaff3ee7de..0181553d31d 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -25,7 +25,7 @@ struct ipc_ids {
int max_id;
unsigned short seq;
unsigned short seq_max;
- struct semaphore sem;
+ struct mutex mutex;
struct ipc_id_ary nullentry;
struct ipc_id_ary* entries;
};
@@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
#define ipc_init_proc_interface(path, header, ids, show) do {} while (0)
#endif
-/* must be called with ids->sem acquired.*/
+/* must be called with ids->mutex acquired.*/
int ipc_findkey(struct ipc_ids* ids, key_t key);
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size);
diff --git a/kernel/compat.c b/kernel/compat.c
index 8c9cd88b678..b9bdd1271f4 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -21,6 +21,7 @@
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/security.h>
+#include <linux/timex.h>
#include <asm/uaccess.h>
@@ -898,3 +899,61 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
return -ERESTARTNOHAND;
}
#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
+
+asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
+{
+ struct timex txc;
+ int ret;
+
+ memset(&txc, 0, sizeof(struct timex));
+
+ if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
+ __get_user(txc.modes, &utp->modes) ||
+ __get_user(txc.offset, &utp->offset) ||
+ __get_user(txc.freq, &utp->freq) ||
+ __get_user(txc.maxerror, &utp->maxerror) ||
+ __get_user(txc.esterror, &utp->esterror) ||
+ __get_user(txc.status, &utp->status) ||
+ __get_user(txc.constant, &utp->constant) ||
+ __get_user(txc.precision, &utp->precision) ||
+ __get_user(txc.tolerance, &utp->tolerance) ||
+ __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __get_user(txc.tick, &utp->tick) ||
+ __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __get_user(txc.jitter, &utp->jitter) ||
+ __get_user(txc.shift, &utp->shift) ||
+ __get_user(txc.stabil, &utp->stabil) ||
+ __get_user(txc.jitcnt, &utp->jitcnt) ||
+ __get_user(txc.calcnt, &utp->calcnt) ||
+ __get_user(txc.errcnt, &utp->errcnt) ||
+ __get_user(txc.stbcnt, &utp->stbcnt))
+ return -EFAULT;
+
+ ret = do_adjtimex(&txc);
+
+ if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
+ __put_user(txc.modes, &utp->modes) ||
+ __put_user(txc.offset, &utp->offset) ||
+ __put_user(txc.freq, &utp->freq) ||
+ __put_user(txc.maxerror, &utp->maxerror) ||
+ __put_user(txc.esterror, &utp->esterror) ||
+ __put_user(txc.status, &utp->status) ||
+ __put_user(txc.constant, &utp->constant) ||
+ __put_user(txc.precision, &utp->precision) ||
+ __put_user(txc.tolerance, &utp->tolerance) ||
+ __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __put_user(txc.tick, &utp->tick) ||
+ __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __put_user(txc.jitter, &utp->jitter) ||
+ __put_user(txc.shift, &utp->shift) ||
+ __put_user(txc.stabil, &utp->stabil) ||
+ __put_user(txc.jitcnt, &utp->jitcnt) ||
+ __put_user(txc.calcnt, &utp->calcnt) ||
+ __put_user(txc.errcnt, &utp->errcnt) ||
+ __put_user(txc.stbcnt, &utp->stbcnt))
+ ret = -EFAULT;
+
+ return ret;
+}
diff --git a/kernel/fork.c b/kernel/fork.c
index d93ab2ba729..e0a2b449dea 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -847,7 +847,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn;
- sig->real_timer.data = tsk;
+ sig->tsk = tsk;
sig->it_virt_expires = cputime_zero;
sig->it_virt_incr = cputime_zero;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 14bc9cfa639..0237a556eb1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -123,6 +123,26 @@ void ktime_get_ts(struct timespec *ts)
EXPORT_SYMBOL_GPL(ktime_get_ts);
/*
+ * Get the coarse grained time at the softirq based on xtime and
+ * wall_to_monotonic.
+ */
+static void hrtimer_get_softirq_time(struct hrtimer_base *base)
+{
+ ktime_t xtim, tomono;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ xtim = timespec_to_ktime(xtime);
+ tomono = timespec_to_ktime(wall_to_monotonic);
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ base[CLOCK_REALTIME].softirq_time = xtim;
+ base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono);
+}
+
+/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
*/
@@ -246,7 +266,7 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
/*
* Divide a ktime value by a nanosecond value
*/
-static unsigned long ktime_divns(const ktime_t kt, nsec_t div)
+static unsigned long ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc, inc, dns;
int sft = 0;
@@ -281,18 +301,17 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* hrtimer_forward - forward the timer expiry
*
* @timer: hrtimer to forward
+ * @now: forward past this time
* @interval: the interval to forward
*
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*/
unsigned long
-hrtimer_forward(struct hrtimer *timer, ktime_t interval)
+hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
unsigned long orun = 1;
- ktime_t delta, now;
-
- now = timer->base->get_time();
+ ktime_t delta;
delta = ktime_sub(now, timer->expires);
@@ -303,7 +322,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t interval)
interval.tv64 = timer->base->resolution.tv64;
if (unlikely(delta.tv64 >= interval.tv64)) {
- nsec_t incr = ktime_to_ns(interval);
+ s64 incr = ktime_to_ns(interval);
orun = ktime_divns(delta, incr);
timer->expires = ktime_add_ns(timer->expires, incr * orun);
@@ -355,8 +374,6 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active);
- timer->state = HRTIMER_PENDING;
-
if (!base->first || timer->expires.tv64 <
rb_entry(base->first, struct hrtimer, node)->expires.tv64)
base->first = &timer->node;
@@ -376,6 +393,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
if (base->first == &timer->node)
base->first = rb_next(&timer->node);
rb_erase(&timer->node, &base->active);
+ timer->node.rb_parent = HRTIMER_INACTIVE;
}
/*
@@ -386,7 +404,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
{
if (hrtimer_active(timer)) {
__remove_hrtimer(timer, base);
- timer->state = HRTIMER_INACTIVE;
return 1;
}
return 0;
@@ -560,6 +577,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
clock_id = CLOCK_MONOTONIC;
timer->base = &bases[clock_id];
+ timer->node.rb_parent = HRTIMER_INACTIVE;
}
/**
@@ -586,48 +604,35 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
*/
static inline void run_hrtimer_queue(struct hrtimer_base *base)
{
- ktime_t now = base->get_time();
struct rb_node *node;
+ if (base->get_softirq_time)
+ base->softirq_time = base->get_softirq_time();
+
spin_lock_irq(&base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
- int (*fn)(void *);
+ int (*fn)(struct hrtimer *);
int restart;
- void *data;
timer = rb_entry(node, struct hrtimer, node);
- if (now.tv64 <= timer->expires.tv64)
+ if (base->softirq_time.tv64 <= timer->expires.tv64)
break;
fn = timer->function;
- data = timer->data;
set_curr_timer(base, timer);
- timer->state = HRTIMER_RUNNING;
__remove_hrtimer(timer, base);
spin_unlock_irq(&base->lock);
- /*
- * fn == NULL is special case for the simplest timer
- * variant - wake up process and do not restart:
- */
- if (!fn) {
- wake_up_process(data);
- restart = HRTIMER_NORESTART;
- } else
- restart = fn(data);
+ restart = fn(timer);
spin_lock_irq(&base->lock);
- /* Another CPU has added back the timer */
- if (timer->state != HRTIMER_RUNNING)
- continue;
-
- if (restart == HRTIMER_RESTART)
+ if (restart != HRTIMER_NORESTART) {
+ BUG_ON(hrtimer_active(timer));
enqueue_hrtimer(timer, base);
- else
- timer->state = HRTIMER_EXPIRED;
+ }
}
set_curr_timer(base, NULL);
spin_unlock_irq(&base->lock);
@@ -641,6 +646,8 @@ void hrtimer_run_queues(void)
struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
int i;
+ hrtimer_get_softirq_time(base);
+
for (i = 0; i < MAX_HRTIMER_BASES; i++)
run_hrtimer_queue(&base[i]);
}
@@ -649,79 +656,70 @@ void hrtimer_run_queues(void)
* Sleep related functions:
*/
-/**
- * schedule_hrtimer - sleep until timeout
- *
- * @timer: hrtimer variable initialized with the correct clock base
- * @mode: timeout value is abs/rel
- *
- * Make the current task sleep until @timeout is
- * elapsed.
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to
- * pass before the routine returns. The routine will return 0
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * will be returned
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- */
-static ktime_t __sched
-schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode)
-{
- /* fn stays NULL, meaning single-shot wakeup: */
- timer->data = current;
+struct sleep_hrtimer {
+ struct hrtimer timer;
+ struct task_struct *task;
+ int expired;
+};
- hrtimer_start(timer, timer->expires, mode);
+static int nanosleep_wakeup(struct hrtimer *timer)
+{
+ struct sleep_hrtimer *t =
+ container_of(timer, struct sleep_hrtimer, timer);
- schedule();
- hrtimer_cancel(timer);
+ t->expired = 1;
+ wake_up_process(t->task);
- /* Return the remaining time: */
- if (timer->state != HRTIMER_EXPIRED)
- return ktime_sub(timer->expires, timer->base->get_time());
- else
- return (ktime_t) {.tv64 = 0 };
+ return HRTIMER_NORESTART;
}
-static inline ktime_t __sched
-schedule_hrtimer_interruptible(struct hrtimer *timer,
- const enum hrtimer_mode mode)
+static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ t->timer.function = nanosleep_wakeup;
+ t->task = current;
+ t->expired = 0;
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_start(&t->timer, t->timer.expires, mode);
+
+ schedule();
+
+ if (unlikely(!t->expired)) {
+ hrtimer_cancel(&t->timer);
+ mode = HRTIMER_ABS;
+ }
+ } while (!t->expired && !signal_pending(current));
- return schedule_hrtimer(timer, mode);
+ return t->expired;
}
static long __sched nanosleep_restart(struct restart_block *restart)
{
+ struct sleep_hrtimer t;
struct timespec __user *rmtp;
struct timespec tu;
- void *rfn_save = restart->fn;
- struct hrtimer timer;
- ktime_t rem;
+ ktime_t time;
restart->fn = do_no_restart_syscall;
- hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS);
-
- timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
-
- rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS);
+ hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS);
+ t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
- if (rem.tv64 <= 0)
+ if (do_nanosleep(&t, HRTIMER_ABS))
return 0;
rmtp = (struct timespec __user *) restart->arg2;
- tu = ktime_to_timespec(rem);
- if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ if (rmtp) {
+ time = ktime_sub(t.timer.expires, t.timer.base->get_time());
+ if (time.tv64 <= 0)
+ return 0;
+ tu = ktime_to_timespec(time);
+ if (copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+ }
- restart->fn = rfn_save;
+ restart->fn = nanosleep_restart;
/* The other values in restart are already filled in */
return -ERESTART_RESTARTBLOCK;
@@ -731,33 +729,34 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
- struct hrtimer timer;
+ struct sleep_hrtimer t;
struct timespec tu;
ktime_t rem;
- hrtimer_init(&timer, clockid, mode);
-
- timer.expires = timespec_to_ktime(*rqtp);
-
- rem = schedule_hrtimer_interruptible(&timer, mode);
- if (rem.tv64 <= 0)
+ hrtimer_init(&t.timer, clockid, mode);
+ t.timer.expires = timespec_to_ktime(*rqtp);
+ if (do_nanosleep(&t, mode))
return 0;
/* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_ABS)
return -ERESTARTNOHAND;
- tu = ktime_to_timespec(rem);
-
- if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ if (rmtp) {
+ rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
+ if (rem.tv64 <= 0)
+ return 0;
+ tu = ktime_to_timespec(rem);
+ if (copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+ }
restart = &current_thread_info()->restart_block;
restart->fn = nanosleep_restart;
- restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF;
- restart->arg1 = timer.expires.tv64 >> 32;
+ restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF;
+ restart->arg1 = t.timer.expires.tv64 >> 32;
restart->arg2 = (unsigned long) rmtp;
- restart->arg3 = (unsigned long) timer.base->index;
+ restart->arg3 = (unsigned long) t.timer.base->index;
return -ERESTART_RESTARTBLOCK;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6edfcef291e..ac766ad573e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -271,6 +271,7 @@ void free_irq(unsigned int irq, void *dev_id)
struct irqaction **p;
unsigned long flags;
+ WARN_ON(in_interrupt());
if (irq >= NR_IRQS)
return;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 680e6b70c87..204ed7939e7 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -128,16 +128,16 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
/*
* The timer is automagically restarted, when interval != 0
*/
-int it_real_fn(void *data)
+int it_real_fn(struct hrtimer *timer)
{
- struct task_struct *tsk = (struct task_struct *) data;
+ struct signal_struct *sig =
+ container_of(timer, struct signal_struct, real_timer);
- send_group_sig_info(SIGALRM, SEND_SIG_PRIV, tsk);
-
- if (tsk->signal->it_real_incr.tv64 != 0) {
- hrtimer_forward(&tsk->signal->real_timer,
- tsk->signal->it_real_incr);
+ send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
+ if (sig->it_real_incr.tv64 != 0) {
+ hrtimer_forward(timer, timer->base->softirq_time,
+ sig->it_real_incr);
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fb9f753ef6..1156eb0977d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -323,10 +323,10 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
}
/*
- * This function is called from exit_thread or flush_thread when task tk's
- * stack is being recycled so that we can recycle any function-return probe
- * instances associated with this task. These left over instances represent
- * probed functions that have been called but will never return.
+ * This function is called from finish_task_switch when task tk becomes dead,
+ * so that we can recycle any function-return probe instances associated
+ * with this task. These left over instances represent probed functions
+ * that have been called but will never return.
*/
void __kprobes kprobe_flush_task(struct task_struct *tk)
{
@@ -336,7 +336,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
unsigned long flags = 0;
spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(current);
+ head = kretprobe_inst_table_head(tk);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9944379360b..ac6dc874442 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -145,7 +145,7 @@ static int common_timer_set(struct k_itimer *, int,
struct itimerspec *, struct itimerspec *);
static int common_timer_del(struct k_itimer *timer);
-static int posix_timer_fn(void *data);
+static int posix_timer_fn(struct hrtimer *data);
static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
@@ -251,15 +251,18 @@ __initcall(init_posix_timers);
static void schedule_next_timer(struct k_itimer *timr)
{
+ struct hrtimer *timer = &timr->it.real.timer;
+
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += hrtimer_forward(&timr->it.real.timer,
+ timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
timr->it.real.interval);
+
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
++timr->it_requeue_pending;
- hrtimer_restart(&timr->it.real.timer);
+ hrtimer_restart(timer);
}
/*
@@ -331,13 +334,14 @@ EXPORT_SYMBOL_GPL(posix_timer_event);
* This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
*/
-static int posix_timer_fn(void *data)
+static int posix_timer_fn(struct hrtimer *timer)
{
- struct k_itimer *timr = data;
+ struct k_itimer *timr;
unsigned long flags;
int si_private = 0;
int ret = HRTIMER_NORESTART;
+ timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
if (timr->it.real.interval.tv64 != 0)
@@ -351,7 +355,8 @@ static int posix_timer_fn(void *data)
*/
if (timr->it.real.interval.tv64 != 0) {
timr->it_overrun +=
- hrtimer_forward(&timr->it.real.timer,
+ hrtimer_forward(timer,
+ timer->base->softirq_time,
timr->it.real.interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
@@ -603,38 +608,41 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
- ktime_t remaining;
+ ktime_t now, remaining, iv;
struct hrtimer *timer = &timr->it.real.timer;
memset(cur_setting, 0, sizeof(struct itimerspec));
- remaining = hrtimer_get_remaining(timer);
- /* Time left ? or timer pending */
- if (remaining.tv64 > 0 || hrtimer_active(timer))
- goto calci;
+ iv = timr->it.real.interval;
+
/* interval timer ? */
- if (timr->it.real.interval.tv64 == 0)
+ if (iv.tv64)
+ cur_setting->it_interval = ktime_to_timespec(iv);
+ else if (!hrtimer_active(timer) &&
+ (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
return;
+
+ now = timer->base->get_time();
+
/*
- * When a requeue is pending or this is a SIGEV_NONE timer
- * move the expiry time forward by intervals, so expiry is >
- * now.
+ * When a requeue is pending or this is a SIGEV_NONE
+ * timer move the expiry time forward by intervals, so
+ * expiry is > now.
*/
- if (timr->it_requeue_pending & REQUEUE_PENDING ||
- (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
- timr->it_overrun +=
- hrtimer_forward(timer, timr->it.real.interval);
- remaining = hrtimer_get_remaining(timer);
- }
- calci:
- /* interval timer ? */
- if (timr->it.real.interval.tv64 != 0)
- cur_setting->it_interval =
- ktime_to_timespec(timr->it.real.interval);
+ if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
+ (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
+ timr->it_overrun += hrtimer_forward(timer, now, iv);
+
+ remaining = ktime_sub(timer->expires, now);
/* Return 0 only, when the timer is expired and not pending */
- if (remaining.tv64 <= 0)
- cur_setting->it_value.tv_nsec = 1;
- else
+ if (remaining.tv64 <= 0) {
+ /*
+ * A single shot SIGEV_NONE timer must return 0, when
+ * it is expired !
+ */
+ if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
+ cur_setting->it_value.tv_nsec = 1;
+ } else
cur_setting->it_value = ktime_to_timespec(remaining);
}
@@ -717,7 +725,6 @@ common_timer_set(struct k_itimer *timr, int flags,
mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
- timr->it.real.timer.data = timr;
timr->it.real.timer.function = posix_timer_fn;
timer->expires = timespec_to_ktime(new_setting->it_value);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 9177f3f73a6..044b8e0c102 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -454,10 +454,11 @@ static int load_image(struct swap_map_handle *handle,
nr_pages++;
}
} while (ret > 0);
- if (!error)
+ if (!error) {
printk("\b\b\b\bdone\n");
- if (!snapshot_image_loaded(snapshot))
- error = -ENODATA;
+ if (!snapshot_image_loaded(snapshot))
+ error = -ENODATA;
+ }
return error;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 7ffaabd64f8..78acdefeccc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -49,6 +49,7 @@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/acct.h>
+#include <linux/kprobes.h>
#include <asm/tlb.h>
#include <asm/unistd.h>
@@ -1546,8 +1547,14 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
- if (unlikely(prev_task_flags & PF_DEAD))
+ if (unlikely(prev_task_flags & PF_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+ * task and put them back on the free list.
+ */
+ kprobe_flush_task(prev);
put_task_struct(prev);
+ }
}
/**
diff --git a/kernel/time.c b/kernel/time.c
index e00a97b7724..ff8e7019c4c 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -610,7 +610,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
*
* Returns the timespec representation of the nsec parameter.
*/
-struct timespec ns_to_timespec(const nsec_t nsec)
+struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
@@ -630,7 +630,7 @@ struct timespec ns_to_timespec(const nsec_t nsec)
*
* Returns the timeval representation of the nsec parameter.
*/
-struct timeval ns_to_timeval(const nsec_t nsec)
+struct timeval ns_to_timeval(const s64 nsec)
{
struct timespec ts = ns_to_timespec(nsec);
struct timeval tv;
diff --git a/lib/Makefile b/lib/Makefile
index f827e3c24ec..b830c9a1554 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
+lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 8acab0e176e..ed2ae3b0cd0 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
-#if BITS_PER_LONG == 32
int __bitmap_weight(const unsigned long *bitmap, int bits)
{
int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
- w += hweight32(bitmap[k]);
+ w += hweight_long(bitmap[k]);
if (bits % BITS_PER_LONG)
- w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
return w;
}
-#else
-int __bitmap_weight(const unsigned long *bitmap, int bits)
-{
- int k, w = 0, lim = bits/BITS_PER_LONG;
-
- for (k = 0; k < lim; k++)
- w += hweight64(bitmap[k]);
-
- if (bits % BITS_PER_LONG)
- w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
-
- return w;
-}
-#endif
EXPORT_SYMBOL(__bitmap_weight);
/*
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index c05b4b19cf6..bda0d71a251 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -11,48 +11,171 @@
#include <linux/bitops.h>
#include <linux/module.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
-int find_next_bit(const unsigned long *addr, int size, int offset)
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
{
- const unsigned long *base;
- const int NBITS = sizeof(*addr) * 8;
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
- base = addr;
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
if (offset) {
- int suboffset;
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
- addr += offset / NBITS;
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
- suboffset = offset % NBITS;
- if (suboffset) {
- tmp = *addr;
- tmp >>= suboffset;
- if (tmp)
- goto finish;
- }
+EXPORT_SYMBOL(find_next_bit);
- addr++;
+/*
+ * This implementation of find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
}
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
+
+EXPORT_SYMBOL(find_next_zero_bit);
- while ((tmp = *addr) == 0)
- addr++;
+#ifdef __BIG_ENDIAN
- offset = (addr - base) * NBITS;
+/* include/linux/byteorder does not support "unsigned long" type */
+static inline unsigned long ext2_swabp(const unsigned long * x)
+{
+#if BITS_PER_LONG == 64
+ return (unsigned long) __swab64p((u64 *) x);
+#elif BITS_PER_LONG == 32
+ return (unsigned long) __swab32p((u32 *) x);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+/* include/linux/byteorder doesn't support "unsigned long" type */
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+ return (unsigned long) __swab64((u64) y);
+#elif BITS_PER_LONG == 32
+ return (unsigned long) __swab32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
- finish:
- /* count the remaining bits without using __ffs() since that takes a 32-bit arg */
- while (!(tmp & 0xff)) {
- offset += 8;
- tmp >>= 8;
+unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= (BITS_PER_LONG - 1UL);
+ if (offset) {
+ tmp = ext2_swabp(p++);
+ tmp |= (~0UL >> (BITS_PER_LONG - offset));
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
}
- while (!(tmp & 1)) {
- offset++;
- tmp >>= 1;
+ while (size & ~(BITS_PER_LONG - 1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
}
+ if (!size)
+ return result;
+ tmp = ext2_swabp(p);
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. Skip ffz */
+found_middle:
+ return result + ffz(tmp);
- return offset;
+found_middle_swap:
+ return result + ffz(ext2_swab(tmp));
}
-EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(generic_find_next_zero_le_bit);
+
+#endif /* __BIG_ENDIAN */
diff --git a/lib/hweight.c b/lib/hweight.c
new file mode 100644
index 00000000000..43825767170
--- /dev/null
+++ b/lib/hweight.c
@@ -0,0 +1,53 @@
+#include <linux/module.h>
+#include <asm/types.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+EXPORT_SYMBOL(hweight32);
+
+unsigned int hweight16(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x5555);
+ res = (res & 0x3333) + ((res >> 2) & 0x3333);
+ res = (res + (res >> 4)) & 0x0F0F;
+ return (res + (res >> 8)) & 0x00FF;
+}
+EXPORT_SYMBOL(hweight16);
+
+unsigned int hweight8(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res + (res >> 4)) & 0x0F;
+}
+EXPORT_SYMBOL(hweight8);
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+EXPORT_SYMBOL(hweight64);
diff --git a/mm/highmem.c b/mm/highmem.c
index d0ea1eec6a9..55885f64af4 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -31,14 +31,9 @@
static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
+static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
- return alloc_page(gfp_mask | GFP_DMA);
-}
-
-static void page_pool_free(void *page, void *data)
-{
- __free_page(page);
+ return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
/*
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
*/
#ifdef CONFIG_HIGHMEM
-static void *page_pool_alloc(gfp_t gfp_mask, void *data)
-{
- return alloc_page(gfp_mask);
-}
-
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
if (!i.totalhigh)
return 0;
- page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
+ page_pool = mempool_create_page_pool(POOL_SIZE, 0);
if (!page_pool)
BUG();
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
if (isa_page_pool)
return 0;
- isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
+ mempool_free_pages, (void *) 0);
if (!isa_page_pool)
BUG();
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
bio_put(bio);
}
-static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
+static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
{
if (bio->bi_size)
return 1;
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
}
static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
- mempool_t *pool)
+ mempool_t *pool)
{
struct page *page;
struct bio *bio = NULL;
diff --git a/mm/memory.c b/mm/memory.c
index d90ff9d0495..8d8f52569f3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1071,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
if (pages) {
pages[i] = page;
+
+ flush_anon_page(page, start);
flush_dcache_page(page);
}
if (vmas)
diff --git a/mm/mempool.c b/mm/mempool.c
index 9ef13dd68ab..fe6e05289cc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -289,3 +289,45 @@ void mempool_free_slab(void *element, void *pool_data)
kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);
+
+/*
+ * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
+ * specfied by pool_data
+ */
+void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
+{
+ size_t size = (size_t)(long)pool_data;
+ return kmalloc(size, gfp_mask);
+}
+EXPORT_SYMBOL(mempool_kmalloc);
+
+void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
+{
+ size_t size = (size_t) pool_data;
+ return kzalloc(size, gfp_mask);
+}
+EXPORT_SYMBOL(mempool_kzalloc);
+
+void mempool_kfree(void *element, void *pool_data)
+{
+ kfree(element);
+}
+EXPORT_SYMBOL(mempool_kfree);
+
+/*
+ * A simple mempool-backed page allocator that allocates pages
+ * of the order specified by pool_data.
+ */
+void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
+{
+ int order = (int)(long)pool_data;
+ return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL(mempool_alloc_pages);
+
+void mempool_free_pages(void *element, void *pool_data)
+{
+ int order = (int)(long)pool_data;
+ __free_pages(element, order);
+}
+EXPORT_SYMBOL(mempool_free_pages);
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 7f0288b25fa..f28ec688216 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -34,6 +34,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/mutex.h>
#include <net/ip.h>
#include <net/route.h>
@@ -44,7 +45,7 @@
#include <net/ip_vs.h>
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
-static DECLARE_MUTEX(__ip_vs_mutex);
+static DEFINE_MUTEX(__ip_vs_mutex);
/* lock for service table */
static DEFINE_RWLOCK(__ip_vs_svc_lock);
@@ -1950,7 +1951,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* increase the module use count */
ip_vs_use_count_inc();
- if (down_interruptible(&__ip_vs_mutex)) {
+ if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
@@ -2041,7 +2042,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
ip_vs_service_put(svc);
out_unlock:
- up(&__ip_vs_mutex);
+ mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -2211,7 +2212,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
- if (down_interruptible(&__ip_vs_mutex))
+ if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
@@ -2330,7 +2331,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
out:
- up(&__ip_vs_mutex);
+ mutex_unlock(&__ip_vs_mutex);
return ret;
}
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index dc1521c5aa8..ba5e23505e8 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -40,6 +40,7 @@
/* FIXME: this is just for IP_NF_ASSERRT */
#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/mutex.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -92,7 +93,7 @@ struct ipt_hashlimit_htable {
};
static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
-static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */
+static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
static HLIST_HEAD(hashlimit_htables);
static kmem_cache_t *hashlimit_cachep __read_mostly;
@@ -542,13 +543,13 @@ hashlimit_checkentry(const char *tablename,
* call vmalloc, and that can sleep. And we cannot just re-search
* the list of htable's in htable_create(), since then we would
* create duplicate proc files. -HW */
- down(&hlimit_mutex);
+ mutex_lock(&hlimit_mutex);
r->hinfo = htable_find_get(r->name);
if (!r->hinfo && (htable_create(r) != 0)) {
- up(&hlimit_mutex);
+ mutex_unlock(&hlimit_mutex);
return 0;
}
- up(&hlimit_mutex);
+ mutex_unlock(&hlimit_mutex);
/* Ugly hack: For SMP, we only want to use one set */
r->u.master = r;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 43e72419c86..f329b72578f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -13,26 +13,27 @@
#include <linux/socket.h>
#include <linux/string.h>
#include <linux/skbuff.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <net/genetlink.h>
struct sock *genl_sock = NULL;
-static DECLARE_MUTEX(genl_sem); /* serialization of message processing */
+static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static void genl_lock(void)
{
- down(&genl_sem);
+ mutex_lock(&genl_mutex);
}
static int genl_trylock(void)
{
- return down_trylock(&genl_sem);
+ return !mutex_trylock(&genl_mutex);
}
static void genl_unlock(void)
{
- up(&genl_sem);
+ mutex_unlock(&genl_mutex);
if (genl_sock && genl_sock->sk_receive_queue.qlen)
genl_sock->sk_data_ready(genl_sock, 0);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b9969b91a9f..5c3eee76850 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -1167,16 +1167,12 @@ rpc_init_mempool(void)
NULL, NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
- rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
- mempool_alloc_slab,
- mempool_free_slab,
- rpc_task_slabp);
+ rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
+ rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
- rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
- mempool_alloc_slab,
- mempool_free_slab,
- rpc_buffer_slabp);
+ rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
+ rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c
index 1fbd5137f6d..de60a059ff5 100644
--- a/sound/oss/cmpci.c
+++ b/sound/oss/cmpci.c
@@ -1713,7 +1713,7 @@ static int mixer_ioctl(struct cm_state *s, unsigned int cmd, unsigned long arg)
case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
if (get_user(val, p))
return -EFAULT;
- i = generic_hweight32(val);
+ i = hweight32(val);
for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
if (!(val & (1 << i)))
continue;
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c
index 69a4b8778b5..4471757b798 100644
--- a/sound/oss/sonicvibes.c
+++ b/sound/oss/sonicvibes.c
@@ -407,24 +407,6 @@ static inline unsigned ld2(unsigned int x)
return r;
}
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#ifdef hweight32
-#undef hweight32
-#endif
-
-static inline unsigned int hweight32(unsigned int w)
-{
- unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
- res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
- res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
- res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
- return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
-}
-
/* --------------------------------------------------------------------- */
/*
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index b372e88e857..5f140c7586b 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -248,27 +248,6 @@ typedef struct lithium {
} lithium_t;
/*
- * li_create initializes the lithium_t structure and sets up vm mappings
- * to access the registers.
- * Returns 0 on success, -errno on failure.
- */
-
-static int __init li_create(lithium_t *lith, unsigned long baseaddr)
-{
- static void li_destroy(lithium_t *);
-
- spin_lock_init(&lith->lock);
- lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE);
- lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE);
- lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE);
- if (!lith->page0 || !lith->page1 || !lith->page2) {
- li_destroy(lith);
- return -ENOMEM;
- }
- return 0;
-}
-
-/*
* li_destroy destroys the lithium_t structure and vm mappings.
*/
@@ -289,6 +268,25 @@ static void li_destroy(lithium_t *lith)
}
/*
+ * li_create initializes the lithium_t structure and sets up vm mappings
+ * to access the registers.
+ * Returns 0 on success, -errno on failure.
+ */
+
+static int __init li_create(lithium_t *lith, unsigned long baseaddr)
+{
+ spin_lock_init(&lith->lock);
+ lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE);
+ lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE);
+ lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE);
+ if (!lith->page0 || !lith->page1 || !lith->page2) {
+ li_destroy(lith);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
* basic register accessors - read/write long/byte
*/