summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/storvsc_drv.c (renamed from drivers/staging/hv/storvsc_drv.c)1020
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig86
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/android/alarm-dev.c297
-rw-r--r--drivers/staging/android/alarm.c601
-rw-r--r--drivers/staging/android/android_alarm.h121
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/binder.c9
-rw-r--r--drivers/staging/android/logger.c78
-rw-r--r--drivers/staging/android/lowmemorykiller.c91
-rw-r--r--drivers/staging/android/persistent_ram.c470
-rw-r--r--drivers/staging/android/persistent_ram.h78
-rw-r--r--drivers/staging/android/ram_console.c420
-rw-r--r--drivers/staging/android/timed_gpio.c6
-rw-r--r--drivers/staging/android/timed_gpio.h6
-rw-r--r--drivers/staging/asus_oled/asus_oled.c19
-rw-r--r--drivers/staging/bcm/Bcmchar.c41
-rw-r--r--drivers/staging/bcm/CmHost.c3113
-rw-r--r--drivers/staging/bcm/led_control.h80
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c12
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/me4000.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c42
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h3
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h40
-rw-r--r--drivers/staging/crystalhd/crystalhd.h14
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c3
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c11
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c7
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h34
-rw-r--r--drivers/staging/et131x/README2
-rw-r--r--drivers/staging/et131x/et131x.c10
-rw-r--r--drivers/staging/et131x/et131x.h4
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/frontier/tranzport.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c6
-rw-r--r--drivers/staging/hv/Kconfig5
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO5
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c241
-rw-r--r--drivers/staging/iio/Documentation/inkernel.txt58
-rw-r--r--drivers/staging/iio/Kconfig9
-rw-r--r--drivers/staging/iio/Makefile4
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c4
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/adc/Kconfig9
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c45
-rw-r--r--drivers/staging/iio/adc/ad7291.c14
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c4
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c83
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c13
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7793.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c4
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c4
-rw-r--r--drivers/staging/iio/adc/adt7310.c21
-rw-r--r--drivers/staging/iio/adc/adt7410.c21
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c237
-rw-r--r--drivers/staging/iio/adc/max1363_core.c50
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c2
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c18
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c18
-rw-r--r--drivers/staging/iio/addac/adt7316.c11
-rw-r--r--drivers/staging/iio/addac/adt7316.h9
-rw-r--r--drivers/staging/iio/buffer.h2
-rw-r--r--drivers/staging/iio/cdc/ad7150.c10
-rw-r--r--drivers/staging/iio/consumer.h96
-rw-r--r--drivers/staging/iio/dac/Kconfig7
-rw-r--r--drivers/staging/iio/dac/ad5064.c369
-rw-r--r--drivers/staging/iio/dac/ad5360.c4
-rw-r--r--drivers/staging/iio/dac/ad5380.c4
-rw-r--r--drivers/staging/iio/dac/ad5421.c13
-rw-r--r--drivers/staging/iio/dac/ad5446.c35
-rw-r--r--drivers/staging/iio/dac/ad5764.c13
-rw-r--r--drivers/staging/iio/dac/max517.c18
-rw-r--r--drivers/staging/iio/dds/ad9834.c53
-rw-r--r--drivers/staging/iio/driver.h34
-rw-r--r--drivers/staging/iio/events.h4
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c2
-rw-r--r--drivers/staging/iio/iio.h70
-rw-r--r--drivers/staging/iio/iio_core.h4
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c2
-rw-r--r--drivers/staging/iio/iio_hwmon.c232
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c3
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c2
-rw-r--r--drivers/staging/iio/industrialio-buffer.c6
-rw-r--r--drivers/staging/iio/industrialio-core.c658
-rw-r--r--drivers/staging/iio/industrialio-event.c453
-rw-r--r--drivers/staging/iio/inkern.c292
-rw-r--r--drivers/staging/iio/kfifo_buf.c46
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c7
-rw-r--r--drivers/staging/iio/light/tsl2563.c65
-rw-r--r--drivers/staging/iio/light/tsl2583.c19
-rw-r--r--drivers/staging/iio/machine.h24
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c26
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/meter.h2
-rw-r--r--drivers/staging/iio/ring_sw.c26
-rw-r--r--drivers/staging/iio/ring_sw.h5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c12
-rw-r--r--drivers/staging/iio/types.h4
-rw-r--r--drivers/staging/keucr/TODO2
-rw-r--r--drivers/staging/line6/capture.c54
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c2
-rw-r--r--drivers/staging/line6/pcm.c109
-rw-r--r--drivers/staging/line6/pcm.h167
-rw-r--r--drivers/staging/line6/playback.c68
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/toneport.c12
-rw-r--r--drivers/staging/line6/usbdefs.h44
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c17
-rw-r--r--drivers/staging/mei/TODO3
-rw-r--r--drivers/staging/mei/hw.h47
-rw-r--r--drivers/staging/mei/init.c24
-rw-r--r--drivers/staging/mei/interface.c72
-rw-r--r--drivers/staging/mei/interface.h7
-rw-r--r--drivers/staging/mei/interrupt.c106
-rw-r--r--drivers/staging/mei/iorw.c17
-rw-r--r--drivers/staging/mei/main.c14
-rw-r--r--drivers/staging/mei/mei-amt-version.c481
-rw-r--r--drivers/staging/mei/mei.h127
-rw-r--r--drivers/staging/mei/mei.txt6
-rw-r--r--drivers/staging/mei/mei_dev.h10
-rw-r--r--drivers/staging/mei/mei_version.h31
-rw-r--r--drivers/staging/mei/wd.c8
-rw-r--r--drivers/staging/nvec/Kconfig6
-rw-r--r--drivers/staging/nvec/nvec.c19
-rw-r--r--drivers/staging/nvec/nvec_ps2.c53
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c4
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c37
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c97
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c91
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h15
-rw-r--r--drivers/staging/omapdrm/omap_drv.c16
-rw-r--r--drivers/staging/omapdrm/omap_drv.h19
-rw-r--r--drivers/staging/omapdrm/omap_fb.c124
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c26
-rw-r--r--drivers/staging/omapdrm/omap_gem.c172
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c2
-rw-r--r--drivers/staging/omapdrm/omap_plane.c197
-rw-r--r--drivers/staging/ozwpan/Kbuild19
-rw-r--r--drivers/staging/ozwpan/Kconfig9
-rw-r--r--drivers/staging/ozwpan/README25
-rw-r--r--drivers/staging/ozwpan/TODO12
-rw-r--r--drivers/staging/ozwpan/ozappif.h46
-rw-r--r--drivers/staging/ozwpan/ozcdev.c521
-rw-r--r--drivers/staging/ozwpan/ozcdev.h18
-rw-r--r--drivers/staging/ozwpan/ozconfig.h27
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c339
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.h70
-rw-r--r--drivers/staging/ozwpan/ozevent.c116
-rw-r--r--drivers/staging/ozwpan/ozevent.h31
-rw-r--r--drivers/staging/ozwpan/ozeventdef.h47
-rw-r--r--drivers/staging/ozwpan/ozhcd.c2256
-rw-r--r--drivers/staging/ozwpan/ozhcd.h15
-rw-r--r--drivers/staging/ozwpan/ozmain.c58
-rw-r--r--drivers/staging/ozwpan/ozpd.c832
-rw-r--r--drivers/staging/ozwpan/ozpd.h121
-rw-r--r--drivers/staging/ozwpan/ozproto.c957
-rw-r--r--drivers/staging/ozwpan/ozproto.h69
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h372
-rw-r--r--drivers/staging/ozwpan/oztrace.c36
-rw-r--r--drivers/staging/ozwpan/oztrace.h35
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c53
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h19
-rw-r--r--drivers/staging/ozwpan/ozusbif.h43
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c245
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.h32
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c437
-rw-r--r--drivers/staging/quickstart/quickstart.c370
-rw-r--r--drivers/staging/ramster/Kconfig17
-rw-r--r--drivers/staging/ramster/Makefile1
-rw-r--r--drivers/staging/ramster/TODO13
-rw-r--r--drivers/staging/ramster/cluster/Makefile3
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.c464
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.h87
-rw-r--r--drivers/staging/ramster/cluster/masklog.c155
-rw-r--r--drivers/staging/ramster/cluster/masklog.h220
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.c992
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.h88
-rw-r--r--drivers/staging/ramster/cluster/ramster_nodemanager.h39
-rw-r--r--drivers/staging/ramster/cluster/tcp.c2256
-rw-r--r--drivers/staging/ramster/cluster/tcp.h159
-rw-r--r--drivers/staging/ramster/cluster/tcp_internal.h248
-rw-r--r--drivers/staging/ramster/r2net.c401
-rw-r--r--drivers/staging/ramster/ramster.h118
-rw-r--r--drivers/staging/ramster/tmem.c851
-rw-r--r--drivers/staging/ramster/tmem.h244
-rw-r--r--drivers/staging/ramster/xvmalloc.c (renamed from drivers/staging/zram/xvmalloc.c)0
-rw-r--r--drivers/staging/ramster/xvmalloc.h (renamed from drivers/staging/zram/xvmalloc.h)0
-rw-r--r--drivers/staging/ramster/xvmalloc_int.h (renamed from drivers/staging/zram/xvmalloc_int.h)0
-rw-r--r--drivers/staging/ramster/zcache-main.c3320
-rw-r--r--drivers/staging/ramster/zcache.h22
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c111
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c1792
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c286
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c1
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c6
-rw-r--r--drivers/staging/rtl8712/osdep_service.h17
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h3
-rw-r--r--drivers/staging/rtl8712/sta_info.h4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c9
-rw-r--r--drivers/staging/rts5139/TODO6
-rw-r--r--drivers/staging/rts5139/ms.h4
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c14
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h2
-rw-r--r--drivers/staging/rts5139/sd_cprm.c2
-rw-r--r--drivers/staging/rts_pstor/TODO6
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c2
-rw-r--r--drivers/staging/sep/Kconfig3
-rw-r--r--drivers/staging/sep/Makefile5
-rw-r--r--drivers/staging/sep/TODO5
-rw-r--r--drivers/staging/sep/sep_crypto.c4058
-rw-r--r--drivers/staging/sep/sep_crypto.h359
-rw-r--r--drivers/staging/sep/sep_dev.h98
-rw-r--r--drivers/staging/sep/sep_driver.c2932
-rw-r--r--drivers/staging/sep/sep_driver_api.h293
-rw-r--r--drivers/staging/sep/sep_driver_config.h79
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h185
-rw-r--r--drivers/staging/sep/sep_main.c4518
-rw-r--r--drivers/staging/sep/sep_trace_events.h188
-rw-r--r--drivers/staging/slicoss/README2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c3
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/telephony/Kconfig (renamed from drivers/telephony/Kconfig)0
-rw-r--r--drivers/staging/telephony/Makefile (renamed from drivers/telephony/Makefile)0
-rw-r--r--drivers/staging/telephony/TODO10
-rw-r--r--drivers/staging/telephony/ixj-ver.h (renamed from drivers/telephony/ixj-ver.h)0
-rw-r--r--drivers/staging/telephony/ixj.c (renamed from drivers/telephony/ixj.c)0
-rw-r--r--drivers/staging/telephony/ixj.h (renamed from drivers/telephony/ixj.h)0
-rw-r--r--drivers/staging/telephony/ixj_pcmcia.c (renamed from drivers/telephony/ixj_pcmcia.c)0
-rw-r--r--drivers/staging/telephony/phonedev.c (renamed from drivers/telephony/phonedev.c)0
-rw-r--r--drivers/staging/tidspbridge/Kconfig22
-rw-r--r--drivers/staging/tidspbridge/Makefile4
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c34
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c3
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c29
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c19
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c1
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c18
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c24
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c18
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnl.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h30
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cod.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbc.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/disp.h31
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h23
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/msg.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldr.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h34
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/node.h41
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodepriv.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h28
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rmm.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strm.h38
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c47
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c97
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c103
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c125
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c182
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c82
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c45
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c38
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c103
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c69
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c74
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c366
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.h28
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c45
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c99
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c129
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c119
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c56
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c114
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_dev.c2
-rw-r--r--drivers/staging/usbip/stub_rx.c9
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c41
-rw-r--r--drivers/staging/usbip/vhci_rx.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h4
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vme/vme.h2
-rw-r--r--drivers/staging/vt6655/bssdb.c4
-rw-r--r--drivers/staging/vt6655/ioctl.c23
-rw-r--r--drivers/staging/vt6656/bssdb.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c230
-rw-r--r--drivers/staging/vt6656/iwctl.h13
-rw-r--r--drivers/staging/vt6656/main_usb.c13
-rw-r--r--drivers/staging/vt6656/wpactl.c937
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h78
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c208
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_def.h178
-rw-r--r--drivers/staging/xgifb/vb_init.c20
-rw-r--r--drivers/staging/xgifb/vb_setmode.c836
-rw-r--r--drivers/staging/xgifb/vb_struct.h79
-rw-r--r--drivers/staging/xgifb/vb_table.h346
-rw-r--r--drivers/staging/xgifb/vgatypes.h9
-rw-r--r--drivers/staging/zcache/Kconfig13
-rw-r--r--drivers/staging/zcache/tmem.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c237
-rw-r--r--drivers/staging/zram/Kconfig10
-rw-r--r--drivers/staging/zram/Makefile1
-rw-r--r--drivers/staging/zram/zram_drv.c116
-rw-r--r--drivers/staging/zram/zram_drv.h12
-rw-r--r--drivers/staging/zram/zram_sysfs.c4
-rw-r--r--drivers/staging/zsmalloc/Kconfig14
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c745
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc_int.h155
378 files changed, 37807 insertions, 14013 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 5afe5d1f199..decf8e42085 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -40,8 +40,6 @@ source "drivers/net/Kconfig"
source "drivers/isdn/Kconfig"
-source "drivers/telephony/Kconfig"
-
# input before char - char/joystick depends on it. As does USB.
source "drivers/input/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index c07be024b96..932e8bf2035 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -86,7 +86,6 @@ obj-$(CONFIG_POWER_SUPPLY) += power/
obj-$(CONFIG_HWMON) += hwmon/
obj-$(CONFIG_THERMAL) += thermal/
obj-$(CONFIG_WATCHDOG) += watchdog/
-obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_ACCESSIBILITY) += accessibility/
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 16570aa84aa..d3d18e89cb5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -662,6 +662,13 @@ config VMWARE_PVSCSI
To compile this driver as a module, choose M here: the
module will be called vmw_pvscsi.
+config HYPERV_STORAGE
+ tristate "Microsoft Hyper-V virtual storage driver"
+ depends on SCSI && HYPERV
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V virtual storage driver.
+
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2b887498be5..e4c1a69f8fa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -142,6 +142,7 @@ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
+obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_ARM) += arm/
@@ -170,6 +171,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_mod-y += scsi_trace.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
+hv_storvsc-y := storvsc_drv.o
+
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index eb853f71089..695ffc36e02 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,56 +42,23 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_dbg.h>
+/*
+ * All wire protocol details (storage protocol between the guest and the host)
+ * are consolidated here.
+ *
+ * Begin protocol definitions.
+ */
-#define STORVSC_MIN_BUF_NR 64
-#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
-
-module_param(storvsc_ringbuffer_size, int, S_IRUGO);
-MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
-
-/* to alert the user that structure sizes may be mismatched even though the */
-/* protocol versions match. */
-
-
-#define REVISION_STRING(REVISION_) #REVISION_
-#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
- do { \
- char *revision_string \
- = REVISION_STRING($Rev : 6 $) + 6; \
- RESULT_LVALUE_ = 0; \
- while (*revision_string >= '0' \
- && *revision_string <= '9') { \
- RESULT_LVALUE_ *= 10; \
- RESULT_LVALUE_ += *revision_string - '0'; \
- revision_string++; \
- } \
- } while (0)
-
-/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
-/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
-#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
-#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
-#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
- (((MINOR_) & 0xff)))
-#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
-
-/* Version history: */
-/* V1 Beta 0.1 */
-/* V1 RC < 2008/1/31 1.0 */
-/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
-
-
-
+/*
+ * Version history:
+ * V1 Beta: 0.1
+ * V1 RC < 2008/1/31: 1.0
+ * V1 RC > 2008/1/31: 2.0
+ * Win7: 4.2
+ */
-/* This will get replaced with the max transfer length that is possible on */
-/* the host adapter. */
-/* The max transfer length will be published when we offer a vmbus channel. */
-#define MAX_TRANSFER_LENGTH 0x40000
-#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
- sizeof(struct vstor_packet) + \
- sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
+#define VMSTOR_CURRENT_MAJOR 4
+#define VMSTOR_CURRENT_MINOR 2
/* Packet structure describing virtual storage requests. */
@@ -115,35 +82,31 @@ enum vstor_packet_operation {
* this remains the same across the write regardless of 32/64 bit
* note: it's patterned off the SCSI_PASS_THROUGH structure
*/
-#define CDB16GENERIC_LENGTH 0x10
-
-#ifndef SENSE_BUFFER_SIZE
-#define SENSE_BUFFER_SIZE 0x12
-#endif
-
-#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
+#define STORVSC_MAX_CMD_LEN 0x10
+#define STORVSC_SENSE_BUFFER_SIZE 0x12
+#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
struct vmscsi_request {
- unsigned short length;
- unsigned char srb_status;
- unsigned char scsi_status;
+ u16 length;
+ u8 srb_status;
+ u8 scsi_status;
- unsigned char port_number;
- unsigned char path_id;
- unsigned char target_id;
- unsigned char lun;
+ u8 port_number;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
- unsigned char cdb_length;
- unsigned char sense_info_length;
- unsigned char data_in;
- unsigned char reserved;
+ u8 cdb_length;
+ u8 sense_info_length;
+ u8 data_in;
+ u8 reserved;
- unsigned int data_transfer_length;
+ u32 data_transfer_length;
union {
- unsigned char cdb[CDB16GENERIC_LENGTH];
- unsigned char sense_data[SENSE_BUFFER_SIZE];
- unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
+ u8 cdb[STORVSC_MAX_CMD_LEN];
+ u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
+ u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
};
} __attribute((packed));
@@ -153,32 +116,36 @@ struct vmscsi_request {
* properties of the channel.
*/
struct vmstorage_channel_properties {
- unsigned short protocol_version;
- unsigned char path_id;
- unsigned char target_id;
+ u16 protocol_version;
+ u8 path_id;
+ u8 target_id;
/* Note: port number is only really known on the client side */
- unsigned int port_number;
- unsigned int flags;
- unsigned int max_transfer_bytes;
+ u32 port_number;
+ u32 flags;
+ u32 max_transfer_bytes;
- /* This id is unique for each channel and will correspond with */
- /* vendor specific data in the inquirydata */
- unsigned long long unique_id;
+ /*
+ * This id is unique for each channel and will correspond with
+ * vendor specific data in the inquiry data.
+ */
+
+ u64 unique_id;
} __packed;
/* This structure is sent during the storage protocol negotiations. */
struct vmstorage_protocol_version {
/* Major (MSW) and minor (LSW) version numbers. */
- unsigned short major_minor;
+ u16 major_minor;
/*
* Revision number is auto-incremented whenever this file is changed
* (See FILL_VMSTOR_REVISION macro above). Mismatch does not
* definitely indicate incompatibility--but it does indicate mismatched
* builds.
+ * This is only used on the windows side. Just set it to 0.
*/
- unsigned short revision;
+ u16 revision;
} __packed;
/* Channel Property Flags */
@@ -190,10 +157,10 @@ struct vstor_packet {
enum vstor_packet_operation operation;
/* Flags - see below for values */
- unsigned int flags;
+ u32 flags;
/* Status of the request returned from the server side. */
- unsigned int status;
+ u32 status;
/* Data payload area */
union {
@@ -211,18 +178,47 @@ struct vstor_packet {
};
} __packed;
-/* Packet flags */
/*
+ * Packet Flags:
+ *
* This flag indicates that the server should send back a completion for this
* packet.
*/
+
#define REQUEST_COMPLETION_FLAG 0x1
-/* This is the set of flags that the vsc can set in any packets it sends */
-#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG)
+/* Matches Windows-end */
+enum storvsc_request_type {
+ WRITE_TYPE = 0,
+ READ_TYPE,
+ UNKNOWN_TYPE,
+};
+/*
+ * SRB status codes and masks; a subset of the codes used here.
+ */
-/* Defines */
+#define SRB_STATUS_AUTOSENSE_VALID 0x80
+#define SRB_STATUS_INVALID_LUN 0x20
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ERROR 0x04
+
+/*
+ * This is the end of Protocol specific defines.
+ */
+
+
+/*
+ * We setup a mempool to allocate request structures for this driver
+ * on a per-lun basis. The following define specifies the number of
+ * elements in the pool.
+ */
+
+#define STORVSC_MIN_BUF_NR 64
+static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
+
+module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
#define STORVSC_MAX_IO_REQUESTS 128
@@ -235,27 +231,23 @@ struct vstor_packet {
#define STORVSC_MAX_LUNS_PER_TARGET 64
#define STORVSC_MAX_TARGETS 1
#define STORVSC_MAX_CHANNELS 1
-#define STORVSC_MAX_CMD_LEN 16
-/* Matches Windows-end */
-enum storvsc_request_type {
- WRITE_TYPE,
- READ_TYPE,
- UNKNOWN_TYPE,
-};
-struct hv_storvsc_request {
+struct storvsc_cmd_request {
+ struct list_head entry;
+ struct scsi_cmnd *cmd;
+
+ unsigned int bounce_sgl_count;
+ struct scatterlist *bounce_sgl;
+
struct hv_device *device;
/* Synchronize the request/response if needed */
struct completion wait_event;
unsigned char *sense_buffer;
- void *context;
- void (*on_io_completion)(struct hv_storvsc_request *request);
struct hv_multipage_buffer data_buffer;
-
struct vstor_packet vstor_packet;
};
@@ -281,8 +273,8 @@ struct storvsc_device {
unsigned char target_id;
/* Used for vsc/vsp channel reset process */
- struct hv_storvsc_request init_request;
- struct hv_storvsc_request reset_request;
+ struct storvsc_cmd_request init_request;
+ struct storvsc_cmd_request reset_request;
};
struct stor_mem_pools {
@@ -297,16 +289,6 @@ struct hv_host_device {
unsigned char target;
};
-struct storvsc_cmd_request {
- struct list_head entry;
- struct scsi_cmnd *cmd;
-
- unsigned int bounce_sgl_count;
- struct scatterlist *bounce_sgl;
-
- struct hv_storvsc_request request;
-};
-
struct storvsc_scan_work {
struct work_struct work;
struct Scsi_Host *host;
@@ -352,6 +334,34 @@ done:
kfree(wrk);
}
+/*
+ * Major/minor macros. Minor version is in LSB, meaning that earlier flat
+ * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
+ */
+
+static inline u16 storvsc_get_version(u8 major, u8 minor)
+{
+ u16 version;
+
+ version = ((major << 8) | minor);
+ return version;
+}
+
+/*
+ * We can get incoming messages from the host that are not in response to
+ * messages that we have sent out. An example of this would be messages
+ * received by the guest to notify dynamic addition/removal of LUNs. To
+ * deal with potential race conditions where the driver may be in the
+ * midst of being unloaded when we might receive an unsolicited message
+ * from the host, we have implemented a mechanism to gurantee sequential
+ * consistency:
+ *
+ * 1) Once the device is marked as being destroyed, we will fail all
+ * outgoing messages.
+ * 2) We permit incoming messages when the device is being destroyed,
+ * only to properly account for messages already sent out.
+ */
+
static inline struct storvsc_device *get_out_stor_device(
struct hv_device *device)
{
@@ -398,10 +408,231 @@ get_in_err:
}
+static void destroy_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count)
+{
+ int i;
+ struct page *page_buf;
+
+ for (i = 0; i < sg_count; i++) {
+ page_buf = sg_page((&sgl[i]));
+ if (page_buf != NULL)
+ __free_page(page_buf);
+ }
+
+ kfree(sgl);
+}
+
+static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+{
+ int i;
+
+ /* No need to check */
+ if (sg_count < 2)
+ return -1;
+
+ /* We have at least 2 sg entries */
+ for (i = 0; i < sg_count; i++) {
+ if (i == 0) {
+ /* make sure 1st one does not have hole */
+ if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
+ return i;
+ } else if (i == sg_count - 1) {
+ /* make sure last one does not have hole */
+ if (sgl[i].offset != 0)
+ return i;
+ } else {
+ /* make sure no hole in the middle */
+ if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count,
+ unsigned int len,
+ int write)
+{
+ int i;
+ int num_pages;
+ struct scatterlist *bounce_sgl;
+ struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
+
+ num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
+
+ bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!bounce_sgl)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+ goto cleanup;
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
+ }
+
+ return bounce_sgl;
+
+cleanup:
+ destroy_bounce_buffer(bounce_sgl, num_pages);
+ return NULL;
+}
+
+/* Assume the original sgl has enough room */
+static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long dest_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
+ KM_IRQ0) + orig_sgl[i].offset;
+ dest = dest_addr;
+ destlen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr =
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
+ KM_IRQ0);
+
+ while (destlen) {
+ src = bounce_addr + bounce_sgl[j].offset;
+ srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].offset += copylen;
+ destlen -= copylen;
+ dest += copylen;
+
+ if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ /* full */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ j++;
+
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ orig_sgl[i].offset),
+ KM_IRQ0);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (destlen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
+ sg_page((&bounce_sgl[j])), KM_IRQ0);
+ } else if (destlen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ }
+ }
+
+ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
+ KM_IRQ0);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
+static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long src_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
+ KM_IRQ0) + orig_sgl[i].offset;
+ src = src_addr;
+ srclen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr =
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
+ KM_IRQ0);
+
+ while (srclen) {
+ /* assume bounce offset always == 0 */
+ dest = bounce_addr + bounce_sgl[j].length;
+ destlen = PAGE_SIZE - bounce_sgl[j].length;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].length += copylen;
+ srclen -= copylen;
+ src += copylen;
+
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ j++;
+
+ /* if we need to use another bounce buffer */
+ if (srclen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
+ sg_page((&bounce_sgl[j])), KM_IRQ0);
+
+ } else if (srclen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ }
+ }
+
+ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
@@ -416,7 +647,7 @@ static int storvsc_channel_init(struct hv_device *device)
* Now, initiate the vsc/vsp initialization protocol on the open
* channel
*/
- memset(request, 0, sizeof(struct hv_storvsc_request));
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
init_completion(&request->wait_event);
vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
@@ -445,8 +676,13 @@ static int storvsc_channel_init(struct hv_device *device)
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstor_packet->version.revision);
+ vstor_packet->version.major_minor =
+ storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
+
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
@@ -524,9 +760,84 @@ cleanup:
return ret;
}
+
+static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+{
+ struct scsi_cmnd *scmnd = cmd_request->cmd;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ void (*scsi_done_fn)(struct scsi_cmnd *);
+ struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ if (cmd_request->bounce_sgl_count) {
+ if (vm_srb->data_in == READ_TYPE)
+ copy_from_bounce_buffer(scsi_sglist(scmnd),
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+ }
+
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_ERROR)
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ else
+ scmnd->result = vm_srb->scsi_status;
+
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
+ if (scmnd->result) {
+ if (scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr))
+ scsi_print_sense_hdr("storvsc", &sense_hdr);
+ }
+
+ scsi_set_resid(scmnd,
+ cmd_request->data_buffer.len -
+ vm_srb->data_transfer_length);
+
+ scsi_done_fn = scmnd->scsi_done;
+
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done = NULL;
+
+ scsi_done_fn(scmnd);
+
+ mempool_free(cmd_request, memp->request_mempool);
+}
+
static void storvsc_on_io_completion(struct hv_device *device,
struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *stor_pkt;
@@ -546,9 +857,9 @@ static void storvsc_on_io_completion(struct hv_device *device,
*/
if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
- (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
+ (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
vstor_packet->vm_srb.scsi_status = 0;
- vstor_packet->vm_srb.srb_status = 0x1;
+ vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
}
@@ -559,7 +870,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
vstor_packet->vm_srb.sense_info_length;
if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != 1){
+ vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
dev_warn(&device->device,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
stor_pkt->vm_srb.cdb[0],
@@ -569,7 +880,8 @@ static void storvsc_on_io_completion(struct hv_device *device,
if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status & 0x80) {
+ if (vstor_packet->vm_srb.srb_status &
+ SRB_STATUS_AUTOSENSE_VALID) {
/* autosense data available */
dev_warn(&device->device,
"stor pkt %p autosense data valid - len %d\n",
@@ -586,7 +898,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
- request->on_io_completion(request);
+ storvsc_command_completion(request);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
@@ -597,7 +909,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_scan_work *work;
struct storvsc_device *stor_device;
@@ -631,7 +943,7 @@ static void storvsc_on_channel_callback(void *context)
u32 bytes_recvd;
u64 request_id;
unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
int ret;
@@ -645,7 +957,7 @@ static void storvsc_on_channel_callback(void *context)
&bytes_recvd, &request_id);
if (ret == 0 && bytes_recvd > 0) {
- request = (struct hv_storvsc_request *)
+ request = (struct storvsc_cmd_request *)
(unsigned long)request_id;
if ((request == &stor_device->init_request) ||
@@ -674,7 +986,6 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
- /* Open the channel */
ret = vmbus_open(device->channel,
ring_size,
ring_size,
@@ -728,7 +1039,7 @@ static int storvsc_dev_remove(struct hv_device *device)
}
static int storvsc_do_io(struct hv_device *device,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
@@ -749,7 +1060,7 @@ static int storvsc_do_io(struct hv_device *device,
vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
@@ -779,18 +1090,6 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
-static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
-{
- *target =
- dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
-
- *path =
- dev->dev_instance.b[3] << 24 |
- dev->dev_instance.b[2] << 16 |
- dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
-}
-
-
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp;
@@ -849,245 +1148,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
return 0;
}
-static void destroy_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count)
-{
- int i;
- struct page *page_buf;
-
- for (i = 0; i < sg_count; i++) {
- page_buf = sg_page((&sgl[i]));
- if (page_buf != NULL)
- __free_page(page_buf);
- }
-
- kfree(sgl);
-}
-
-static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
-{
- int i;
-
- /* No need to check */
- if (sg_count < 2)
- return -1;
-
- /* We have at least 2 sg entries */
- for (i = 0; i < sg_count; i++) {
- if (i == 0) {
- /* make sure 1st one does not have hole */
- if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
- return i;
- } else if (i == sg_count - 1) {
- /* make sure last one does not have hole */
- if (sgl[i].offset != 0)
- return i;
- } else {
- /* make sure no hole in the middle */
- if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
- return i;
- }
- }
- return -1;
-}
-
-static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count,
- unsigned int len,
- int write)
-{
- int i;
- int num_pages;
- struct scatterlist *bounce_sgl;
- struct page *page_buf;
- unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
-
- num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
-
- bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
- if (!bounce_sgl)
- return NULL;
-
- for (i = 0; i < num_pages; i++) {
- page_buf = alloc_page(GFP_ATOMIC);
- if (!page_buf)
- goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
- }
-
- return bounce_sgl;
-
-cleanup:
- destroy_bounce_buffer(bounce_sgl, num_pages);
- return NULL;
-}
-
-
-/* Assume the original sgl has enough room */
-static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count,
- unsigned int bounce_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long dest_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- dest = dest_addr;
- destlen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].offset += copylen;
- destlen -= copylen;
- dest += copylen;
-
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
- /* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /*
- * It is possible that the number of elements
- * in the bounce buffer may not be equal to
- * the number of elements in the original
- * scatter list. Handle this correctly.
- */
-
- if (j == bounce_sgl_count) {
- /*
- * We are done; cleanup and return.
- */
- kunmap_atomic((void *)(dest_addr -
- orig_sgl[i].offset),
- KM_IRQ0);
- local_irq_restore(flags);
- return total_copied;
- }
-
- /* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
- } else if (destlen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
-static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long src_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- src = src_addr;
- srclen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (srclen) {
- /* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].length += copylen;
- srclen -= copylen;
- src += copylen;
-
- if (bounce_sgl[j].length == PAGE_SIZE) {
- /* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
-
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-static int storvsc_remove(struct hv_device *dev)
-{
- struct storvsc_device *stor_device = hv_get_drvdata(dev);
- struct Scsi_Host *host = stor_device->host;
-
- scsi_remove_host(host);
-
- scsi_host_put(host);
-
- storvsc_dev_remove(dev);
-
- return 0;
-}
-
-
static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
sector_t capacity, int *info)
{
@@ -1111,10 +1171,13 @@ static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
return 0;
}
-static int storvsc_host_reset(struct hv_device *device)
+static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
{
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ struct hv_device *device = host_dev->dev;
+
struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
@@ -1153,105 +1216,16 @@ static int storvsc_host_reset(struct hv_device *device)
return SUCCESS;
}
-
-/*
- * storvsc_host_reset_handler - Reset the scsi HBA
- */
-static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
-{
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- struct hv_device *dev = host_dev->dev;
-
- return storvsc_host_reset(dev);
-}
-
-
-/*
- * storvsc_command_completion - Command completion processing
- */
-static void storvsc_command_completion(struct hv_storvsc_request *request)
-{
- struct storvsc_cmd_request *cmd_request =
- (struct storvsc_cmd_request *)request->context;
- struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- void (*scsi_done_fn)(struct scsi_cmnd *);
- struct scsi_sense_hdr sense_hdr;
- struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
-
- vm_srb = &request->vstor_packet.vm_srb;
- if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE)
- copy_from_bounce_buffer(scsi_sglist(scmnd),
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd),
- cmd_request->bounce_sgl_count);
- destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
- }
-
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side.
- */
- if (vm_srb->srb_status == 0x4)
- scmnd->result = DID_TARGET_FAILURE << 16;
- else
- scmnd->result = vm_srb->scsi_status;
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == 0x20) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
- if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr))
- scsi_print_sense_hdr("storvsc", &sense_hdr);
- }
-
- scsi_set_resid(scmnd,
- request->data_buffer.len -
- vm_srb->data_transfer_length);
-
- scsi_done_fn = scmnd->scsi_done;
-
- scmnd->host_scribble = NULL;
- scmnd->scsi_done = NULL;
-
- scsi_done_fn(scmnd);
-
- mempool_free(cmd_request, memp->request_mempool);
-}
-
-static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
+static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
{
bool allowed = true;
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
- /* smartd sends this command, which will offline the device */
+ /*
+ * smartd sends this command and the host does not handle
+ * this. So, don't send it.
+ */
case SET_WINDOW:
scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
@@ -1262,15 +1236,11 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
return allowed;
}
-/*
- * storvsc_queuecommand - Initiate command processing
- */
static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
{
int ret;
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
- struct hv_storvsc_request *request;
struct storvsc_cmd_request *cmd_request;
unsigned int request_size = 0;
int i;
@@ -1279,38 +1249,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct vmscsi_request *vm_srb;
struct stor_mem_pools *memp = scmnd->device->hostdata;
- if (storvsc_check_scsi_cmd(scmnd) == false) {
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
scmnd->scsi_done(scmnd);
return 0;
}
- /* If retrying, no need to prep the cmd */
- if (scmnd->host_scribble) {
-
- cmd_request =
- (struct storvsc_cmd_request *)scmnd->host_scribble;
-
- goto retry_request;
- }
-
request_size = sizeof(struct storvsc_cmd_request);
cmd_request = mempool_alloc(memp->request_mempool,
GFP_ATOMIC);
+
+ /*
+ * We might be invoked in an interrupt context; hence
+ * mempool_alloc() can fail.
+ */
if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
/* Setup the cmd request */
- cmd_request->bounce_sgl_count = 0;
- cmd_request->bounce_sgl = NULL;
cmd_request->cmd = scmnd;
scmnd->host_scribble = (unsigned char *)cmd_request;
- request = &cmd_request->request;
- vm_srb = &request->vstor_packet.vm_srb;
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
/* Build the SRB */
@@ -1326,8 +1289,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
break;
}
- request->on_io_completion = storvsc_command_completion;
- request->context = cmd_request;/* scmnd; */
vm_srb->port_number = host_dev->port;
vm_srb->path_id = scmnd->device->channel;
@@ -1338,10 +1299,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
- request->sense_buffer = scmnd->sense_buffer;
+ cmd_request->sense_buffer = scmnd->sense_buffer;
- request->data_buffer.len = scsi_bufflen(scmnd);
+ cmd_request->data_buffer.len = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
sg_count = scsi_sg_count(scmnd);
@@ -1353,11 +1314,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
scsi_bufflen(scmnd),
vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
- scmnd->host_scribble = NULL;
- mempool_free(cmd_request,
- memp->request_mempool);
-
- return SCSI_MLQUEUE_HOST_BUSY;
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ goto queue_error;
}
cmd_request->bounce_sgl_count =
@@ -1373,41 +1331,42 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
sg_count = cmd_request->bounce_sgl_count;
}
- request->data_buffer.offset = sgl[0].offset;
+ cmd_request->data_buffer.offset = sgl[0].offset;
for (i = 0; i < sg_count; i++)
- request->data_buffer.pfn_array[i] =
+ cmd_request->data_buffer.pfn_array[i] =
page_to_pfn(sg_page((&sgl[i])));
} else if (scsi_sglist(scmnd)) {
- request->data_buffer.offset =
+ cmd_request->data_buffer.offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- request->data_buffer.pfn_array[0] =
+ cmd_request->data_buffer.pfn_array[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
-retry_request:
/* Invokes the vsc to start an IO */
- ret = storvsc_do_io(dev, &cmd_request->request);
+ ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
/* no more space */
- if (cmd_request->bounce_sgl_count)
+ if (cmd_request->bounce_sgl_count) {
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- mempool_free(cmd_request, memp->request_mempool);
-
- scmnd->host_scribble = NULL;
-
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto queue_error;
+ }
}
+ return 0;
+
+queue_error:
+ mempool_free(cmd_request, memp->request_mempool);
+ scmnd->host_scribble = NULL;
return ret;
}
-/* Scsi driver */
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
.name = "storvsc_host_t",
@@ -1448,11 +1407,6 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
-
-/*
- * storvsc_probe - Add a new device for this driver
- */
-
static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1460,7 +1414,6 @@ static int storvsc_probe(struct hv_device *device,
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
- int path = 0;
int target = 0;
struct storvsc_device *stor_device;
@@ -1493,9 +1446,6 @@ static int storvsc_probe(struct hv_device *device,
if (ret)
goto err_out1;
- if (dev_is_ide)
- storvsc_get_ide_info(device, &target, &path);
-
host_dev->path = stor_device->path_id;
host_dev->target = stor_device->target_id;
@@ -1515,12 +1465,14 @@ static int storvsc_probe(struct hv_device *device,
if (!dev_is_ide) {
scsi_scan_host(host);
- return 0;
- }
- ret = scsi_add_device(host, 0, target, 0);
- if (ret) {
- scsi_remove_host(host);
- goto err_out2;
+ } else {
+ target = (device->dev_instance.b[5] << 8 |
+ device->dev_instance.b[4]);
+ ret = scsi_add_device(host, 0, target, 0);
+ if (ret) {
+ scsi_remove_host(host);
+ goto err_out2;
+ }
}
return 0;
@@ -1542,7 +1494,17 @@ err_out0:
return ret;
}
-/* The one and only one */
+static int storvsc_remove(struct hv_device *dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(dev);
+ struct Scsi_Host *host = stor_device->host;
+
+ scsi_remove_host(host);
+ storvsc_dev_remove(dev);
+ scsi_host_put(host);
+
+ return 0;
+}
static struct hv_driver storvsc_drv = {
.name = KBUILD_MODNAME,
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9e634724978..f1abfb179b4 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -76,8 +76,6 @@ source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
-source "drivers/staging/hv/Kconfig"
-
source "drivers/staging/vme/Kconfig"
source "drivers/staging/sep/Kconfig"
@@ -88,6 +86,8 @@ source "drivers/staging/zram/Kconfig"
source "drivers/staging/zcache/Kconfig"
+source "drivers/staging/zsmalloc/Kconfig"
+
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
@@ -128,4 +128,10 @@ source "drivers/staging/omapdrm/Kconfig"
source "drivers/staging/android/Kconfig"
+source "drivers/staging/telephony/Kconfig"
+
+source "drivers/staging/ramster/Kconfig"
+
+source "drivers/staging/ozwpan/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 943e1483075..ffe7d44374e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,13 +29,12 @@ obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
+obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
@@ -55,3 +54,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_PHONE) += telephony/
+obj-$(CONFIG_RAMSTER) += ramster/
+obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index fef3580ce8d..08a3b1133d2 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -25,65 +25,17 @@ config ANDROID_LOGGER
tristate "Android log driver"
default n
-config ANDROID_RAM_CONSOLE
- bool "Android RAM buffer console"
- depends on !S390 && !UML
- default n
-
-config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
- bool "Enable verbose console messages on Android RAM console"
- default y
- depends on ANDROID_RAM_CONSOLE
-
-menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- bool "Android RAM Console Enable error correction"
- default n
- depends on ANDROID_RAM_CONSOLE
- depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+config ANDROID_PERSISTENT_RAM
+ bool
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
-if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
- int "Android RAM Console Data data size"
- default 128
- help
- Must be a power of 2.
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
- int "Android RAM Console ECC size"
- default 16
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
- int "Android RAM Console Symbol size"
- default 8
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
- hex "Android RAM Console Polynomial"
- default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
- default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
- default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
- default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
- default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
-
-endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-
-config ANDROID_RAM_CONSOLE_EARLY_INIT
- bool "Start Android RAM console early"
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ depends on !S390 && !UML
+ select ANDROID_PERSISTENT_RAM
default n
- depends on ANDROID_RAM_CONSOLE
-
-config ANDROID_RAM_CONSOLE_EARLY_ADDR
- hex "Android RAM console virtual address"
- default 0
- depends on ANDROID_RAM_CONSOLE_EARLY_INIT
-
-config ANDROID_RAM_CONSOLE_EARLY_SIZE
- hex "Android RAM console buffer size"
- default 0
- depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
@@ -102,6 +54,32 @@ config ANDROID_LOW_MEMORY_KILLER
source "drivers/staging/android/switch/Kconfig"
+config ANDROID_INTF_ALARM
+ bool "Android alarm driver"
+ depends on RTC_CLASS
+ default n
+ help
+ Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+ elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+ Also provides an interface to set the wall time which must be used
+ for elapsed realtime to work.
+
+config ANDROID_INTF_ALARM_DEV
+ bool "Android alarm device"
+ depends on ANDROID_INTF_ALARM
+ default y
+ help
+ Exports the alarm interface to user-space.
+
+config ANDROID_ALARM_OLDDRV_COMPAT
+ bool "Android Alarm compatability with old drivers"
+ depends on ANDROID_INTF_ALARM
+ default n
+ help
+ Provides preprocessor alias to aid compatability with
+ older out-of-tree drivers that use the Android Alarm
+ in-kernel API. This will be removed eventually.
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 5fcc24ffdd5..9b6c9ed91f6 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,8 +1,11 @@
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_PERSISTENT_RAM) += persistent_ram.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
+obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index e59c5be4be2..b15fb0d6b15 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -3,7 +3,7 @@ TODO:
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- - add proper arch dependancies as needed
+ - add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644
index 00000000000..03efb34cbe2
--- /dev/null
+++ b/drivers/staging/android/alarm-dev.c
@@ -0,0 +1,297 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct android_alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if (file->private_data == NULL &&
+ cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d clear\n", alarm_type);
+ android_alarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ wake_unlock(&alarm_wake_lock);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if (copy_from_user(&new_alarm_time, (void __user *)arg,
+ sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+ new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ android_alarm_start_range(&alarms[alarm_type],
+ timespec_to_ktime(new_alarm_time),
+ timespec_to_ktime(new_alarm_time));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+ && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ /* fall though */
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&new_rtc_time, (void __user *)arg,
+ sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rv = android_alarm_set_rtc(new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rv < 0)
+ goto err1;
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(&tmp_time);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ tmp_time =
+ ktime_to_timespec(alarm_get_elapsed_realtime());
+ break;
+ case ANDROID_ALARM_TYPE_COUNT:
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(&tmp_time);
+ break;
+ }
+ if (copy_to_user((void __user *)arg, &tmp_time,
+ sizeof(tmp_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (file->private_data != 0) {
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if (alarm_enabled & alarm_type_mask) {
+ pr_alarm(INFO, "alarm_release: clear alarm, "
+ "pending %d\n",
+ !!(alarm_pending & alarm_type_mask));
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ android_alarm_cancel(&alarms[i]);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (alarm_pending | wait_pending) {
+ if (alarm_pending)
+ pr_alarm(INFO, "alarm_release: clear "
+ "pending alarms %x\n", alarm_pending);
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static void alarm_triggered(struct android_alarm *alarm)
+{
+ unsigned long flags;
+ uint32_t alarm_type_mask = 1U << alarm->type;
+
+ pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_enabled & alarm_type_mask) {
+ wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+ int err;
+ int i;
+
+ err = misc_register(&alarm_device);
+ if (err)
+ return err;
+
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+ android_alarm_init(&alarms[i], i, alarm_triggered);
+ wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+ return 0;
+}
+
+static void __exit alarm_dev_exit(void)
+{
+ misc_deregister(&alarm_device);
+ wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/staging/android/alarm.c b/drivers/staging/android/alarm.c
new file mode 100644
index 00000000000..c68950b9e08
--- /dev/null
+++ b/drivers/staging/android/alarm.c
@@ -0,0 +1,601 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+ ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+struct alarm_queue {
+ struct rb_root alarms;
+ struct rb_node *first;
+ struct hrtimer timer;
+ ktime_t delta;
+ bool stopped;
+ ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+ struct android_alarm *alarm;
+ bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+ base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+ if (base->stopped) {
+ pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+ return;
+ }
+
+ if (is_wakeup && !suspended && head_removed)
+ wake_unlock(&alarm_rtc_wake_lock);
+
+ if (!base->first)
+ return;
+
+ alarm = container_of(base->first, struct android_alarm, node);
+
+ pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (is_wakeup && suspended) {
+ pr_alarm(FLOW, "changed alarm while suspened\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+ return;
+ }
+
+ hrtimer_try_to_cancel(&base->timer);
+ base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+ base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+ hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ struct rb_node **link = &base->alarms.rb_node;
+ struct rb_node *parent = NULL;
+ struct android_alarm *entry;
+ int leftmost = 1;
+ bool was_first = false;
+
+ pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ was_first = true;
+ }
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ }
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct android_alarm, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (alarm->expires.tv64 < entry->expires.tv64) {
+ link = &(*link)->rb_left;
+ } else {
+ link = &(*link)->rb_right;
+ leftmost = 0;
+ }
+ }
+ if (leftmost)
+ base->first = &alarm->node;
+ if (leftmost || was_first)
+ update_timer_locked(base, was_first);
+
+ rb_link_node(&alarm->node, parent, link);
+ rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * android_alarm_init - initialize an alarm
+ * @alarm: the alarm to be initialized
+ * @type: the alarm type to be used
+ * @function: alarm callback function
+ */
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *))
+{
+ RB_CLEAR_NODE(&alarm->node);
+ alarm->type = type;
+ alarm->function = function;
+
+ pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * android_alarm_start_range - (re)start an alarm
+ * @alarm: the alarm to be added
+ * @start: earliest expiry time
+ * @end: expiry time
+ */
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm->softexpires = start;
+ alarm->expires = end;
+ alarm_enqueue_locked(alarm);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * android_alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm: alarm to stop
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ * cannot be stopped (it may also be inactive)
+ */
+int android_alarm_try_to_cancel(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ unsigned long flags;
+ bool first = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires));
+ ret = 1;
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ first = true;
+ }
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ if (first)
+ update_timer_locked(base, true);
+ } else
+ pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+ alarm->type, alarm->function);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (!ret && hrtimer_callback_running(&base->timer))
+ ret = -1;
+ return ret;
+}
+
+/**
+ * android_alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm: the alarm to be cancelled
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ */
+int android_alarm_cancel(struct android_alarm *alarm)
+{
+ for (;;) {
+ int ret = android_alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+ cpu_relax();
+ }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time: timespec value containing the new time
+ */
+int android_alarm_set_rtc(struct timespec new_time)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct rtc_time rtc_new_rtc_time;
+ struct timespec tmp_time;
+
+ rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+ pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_time.tv_sec, new_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday,
+ rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ wake_lock(&alarm_rtc_wake_lock);
+ getnstimeofday(&tmp_time);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_try_to_cancel(&alarms[i].timer);
+ alarms[i].stopped = true;
+ alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+ }
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+ timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ ret = do_settimeofday(&new_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ alarms[i].stopped = false;
+ update_timer_locked(&alarms[i], false);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ret < 0) {
+ pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+ goto err;
+ }
+ if (!alarm_rtc_dev) {
+ pr_alarm(ERROR,
+ "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+ goto err;
+ }
+ ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ if (ret < 0)
+ pr_alarm(ERROR, "alarm_set_rtc: "
+ "Failed to set RTC, time will be lost on reboot\n");
+err:
+ wake_unlock(&alarm_rtc_wake_lock);
+ mutex_unlock(&alarm_setrtc_mutex);
+ return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+ ktime_t now;
+ unsigned long flags;
+ struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ now = base->stopped ? base->stopped_time : ktime_get_real();
+ now = ktime_sub(now, base->delta);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+ struct alarm_queue *base;
+ struct android_alarm *alarm;
+ unsigned long flags;
+ ktime_t now;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+
+ base = container_of(timer, struct alarm_queue, timer);
+ now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+ now = ktime_sub(now, base->delta);
+
+ pr_alarm(INT, "alarm_timer_triggered type %ld at %lld\n",
+ base - alarms, ktime_to_ns(now));
+
+ while (base->first) {
+ alarm = container_of(base->first, struct android_alarm, node);
+ if (alarm->softexpires.tv64 > now.tv64) {
+ pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+ alarm->function, ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ break;
+ }
+ base->first = rb_next(&alarm->node);
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm->function(alarm);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (!base->first)
+ pr_alarm(FLOW, "no more alarms of type %ld\n", base - alarms);
+ update_timer_locked(base, true);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ pr_alarm(INT, "rtc alarm triggered\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_delta;
+ struct timespec wall_time;
+ struct alarm_queue *wakeup_queue = NULL;
+ struct alarm_queue *tmp_queue = NULL;
+
+ pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+ hrtimer_cancel(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
+
+ tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+ if (tmp_queue->first)
+ wakeup_queue = tmp_queue;
+ tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+ if (tmp_queue->first && (!wakeup_queue ||
+ hrtimer_get_expires(&tmp_queue->timer).tv64 <
+ hrtimer_get_expires(&wakeup_queue->timer).tv64))
+ wakeup_queue = tmp_queue;
+ if (wakeup_queue) {
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ getnstimeofday(&wall_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ set_normalized_timespec(&rtc_delta,
+ wall_time.tv_sec - rtc_current_time,
+ wall_time.tv_nsec);
+
+ rtc_alarm_time = timespec_sub(ktime_to_timespec(
+ hrtimer_get_expires(&wakeup_queue->timer)),
+ rtc_delta).tv_sec;
+
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ pr_alarm(SUSPEND,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if (rtc_current_time + 1 >= rtc_alarm_time) {
+ pr_alarm(SUSPEND, "alarm about to go off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+ false);
+ update_timer_locked(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ unsigned long flags;
+
+ pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+ update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+ false);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if (alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ alarm_platform_dev =
+ platform_device_register_simple("alarm", -1, NULL, 0);
+ if (IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if (err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (dev == &alarm_rtc_dev->dev) {
+ pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec tmp_time, system_time;
+
+ /* this needs to run after the rtc is read at boot */
+ spin_lock_irqsave(&alarm_slock, flags);
+ /* We read the current rtc and system time so we can later calulate
+ * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ * (rtc - (boot_rtc - boot_systemtime))
+ */
+ getnstimeofday(&tmp_time);
+ ktime_get_ts(&system_time);
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarms[i].timer,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarms[i].timer.function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if (err < 0)
+ goto err1;
+ wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
new file mode 100644
index 00000000000..6eecbde2ef6
--- /dev/null
+++ b/drivers/staging/android/android_alarm.h
@@ -0,0 +1,121 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node: red black tree node for time ordered insertion
+ * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires: the absolute expiry time.
+ * @function: alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct android_alarm {
+ struct rb_node node;
+ enum android_alarm_type type;
+ ktime_t softexpires;
+ ktime_t expires;
+ void (*function)(struct android_alarm *);
+};
+
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *));
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end);
+int android_alarm_try_to_cancel(struct android_alarm *alarm);
+int android_alarm_cancel(struct android_alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int android_alarm_set_rtc(const struct timespec ts);
+
+#ifdef CONFIG_ANDROID_ALARM_OLDDRV_COMPAT
+/*
+ * Some older drivers depend on the old API,
+ * so provide compatability macros for now.
+ */
+#define alarm android_alarm
+#define alarm_init(x, y, z) android_alarm_init(x, y, z)
+#define alarm_start_range(x, y, z) android_alarm_start_range(x, y, z)
+#define alarm_try_to_cancel(x) android_alarm_try_to_cancel(x)
+#define alarm_cancel(x) android_alarm_cancel(x)
+#define alarm_set_rtc(x) android_alarm_set_rtc(x)
+#endif
+
+
+#endif
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 99052bfd3a2..9f1f27e7c86 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -315,7 +315,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
get_file(asma->file);
/*
- * XXX - Reworked to use shmem_zero_setup() instead of
+ * XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
@@ -680,7 +680,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static struct file_operations ashmem_fops = {
+static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index f0b7e6605ab..59e095362c8 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -103,7 +103,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static int binder_debug_no_lock;
+static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -258,7 +258,7 @@ struct binder_ref {
};
struct binder_buffer {
- struct list_head entry; /* free and allocated entries by addesss */
+ struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
@@ -288,6 +288,7 @@ struct binder_proc {
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
@@ -633,7 +634,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
- if (vma && mm != vma->vm_mm) {
+ if (vma && mm != proc->vma_vm_mm) {
pr_err("binder: %d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
@@ -2776,6 +2777,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
proc->vma = NULL;
+ proc->vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -2858,6 +2860,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
barrier();
proc->files = get_files_struct(proc->tsk);
proc->vma = vma;
+ proc->vma_vm_mm = vma->vm_mm;
/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index ffc2d043dd8..ea69b6a77da 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -60,7 +60,11 @@ struct logger_reader {
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-#define logger_offset(n) ((n) & (log->size - 1))
+size_t logger_offset(struct logger_log *log, size_t n)
+{
+ return n & (log->size-1);
+}
+
/*
* file_get_log - Given a file structure, return the associated log
@@ -89,20 +93,24 @@ static inline struct logger_log *file_get_log(struct file *file)
* get_entry_len - Grabs the length of the payload of the next entry starting
* from 'off'.
*
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
* Caller needs to hold log->mutex.
*/
static __u32 get_entry_len(struct logger_log *log, size_t off)
{
__u16 val;
- switch (log->size - off) {
- case 1:
- memcpy(&val, log->buffer + off, 1);
- memcpy(((char *) &val) + 1, log->buffer, 1);
- break;
- default:
- memcpy(&val, log->buffer + off, 2);
- }
+ /* copy 2 bytes from buffer, in memcpy order, */
+ /* handling possible wrap at end of buffer */
+
+ ((__u8 *)&val)[0] = log->buffer[off];
+ if (likely(off+1 < log->size))
+ ((__u8 *)&val)[1] = log->buffer[off+1];
+ else
+ ((__u8 *)&val)[1] = log->buffer[0];
return sizeof(struct logger_entry) + val;
}
@@ -137,7 +145,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
- reader->r_off = logger_offset(reader->r_off + count);
+ reader->r_off = logger_offset(log, reader->r_off + count);
return count;
}
@@ -164,9 +172,10 @@ static ssize_t logger_read(struct file *file, char __user *buf,
start:
while (1) {
+ mutex_lock(&log->mutex);
+
prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
- mutex_lock(&log->mutex);
ret = (log->w_off == reader->r_off);
mutex_unlock(&log->mutex);
if (!ret)
@@ -225,7 +234,7 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
do {
size_t nr = get_entry_len(log, off);
- off = logger_offset(off + nr);
+ off = logger_offset(log, off + nr);
count += nr;
} while (count < len);
@@ -233,16 +242,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
}
/*
- * clock_interval - is a < c < b in mod-space? Put another way, does the line
- * from a to b cross c?
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ * positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ * c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ * c^
+ * or c^
*/
-static inline int clock_interval(size_t a, size_t b, size_t c)
+static inline int is_between(size_t a, size_t b, size_t c)
{
- if (b < a) {
- if (a < c || b >= c)
+ if (a < b) {
+ /* is c between a and b? */
+ if (a < c && c <= b)
return 1;
} else {
- if (a < c && b >= c)
+ /* is c outside of b through a? */
+ if (c <= b || a < c)
return 1;
}
@@ -260,14 +281,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c)
static void fix_up_readers(struct logger_log *log, size_t len)
{
size_t old = log->w_off;
- size_t new = logger_offset(old + len);
+ size_t new = logger_offset(log, old + len);
struct logger_reader *reader;
- if (clock_interval(old, new, log->head))
+ if (is_between(old, new, log->head))
log->head = get_next_entry(log, log->head, len);
list_for_each_entry(reader, &log->readers, list)
- if (clock_interval(old, new, reader->r_off))
+ if (is_between(old, new, reader->r_off))
reader->r_off = get_next_entry(log, reader->r_off, len);
}
@@ -286,7 +307,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count)
if (count != len)
memcpy(log->buffer, buf + len, count - len);
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
}
@@ -309,9 +330,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log,
if (count != len)
if (copy_from_user(log->buffer, buf + len, count - len))
+ /*
+ * Note that by not updating w_off, this abandons the
+ * portion of the new entry that *was* successfully
+ * copied, just above. This is intentional to avoid
+ * message corruption from missing fragments.
+ */
return -EFAULT;
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
return count;
}
@@ -432,7 +459,12 @@ static int logger_release(struct inode *ignored, struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+
+ mutex_lock(&log->mutex);
list_del(&reader->list);
+ mutex_unlock(&log->mutex);
+
kfree(reader);
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index efc7dc1f483..052b43e4e50 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -1,16 +1,17 @@
/* drivers/misc/lowmemorykiller.c
*
* The lowmemorykiller driver lets user-space specify a set of memory thresholds
- * where processes with a range of oom_adj values will get killed. Specify the
- * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
- * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
- * files take a comma separated list of numbers in ascending order.
+ * where processes with a range of oom_score_adj values will get killed. Specify
+ * the minimum oom_score_adj values in
+ * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
+ * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
+ * separated list of numbers in ascending order.
*
* For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
* "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
- * processes with a oom_adj value of 8 or higher when the free memory drops
- * below 4096 pages and kill processes with a oom_adj value of 0 or higher
- * when the free memory drops below 1024 pages.
+ * processes with a oom_score_adj value of 8 or higher when the free memory
+ * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
+ * higher when the free memory drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
@@ -34,6 +35,7 @@
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
@@ -45,7 +47,7 @@ static int lowmem_adj[6] = {
12,
};
static int lowmem_adj_size = 4;
-static size_t lowmem_minfree[6] = {
+static int lowmem_minfree[6] = {
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
@@ -73,23 +75,23 @@ static int
task_notify_func(struct notifier_block *self, unsigned long val, void *data)
{
struct task_struct *task = data;
- if (task == lowmem_deathpending) {
+
+ if (task == lowmem_deathpending)
lowmem_deathpending = NULL;
- task_handoff_unregister(&task_nb);
- }
+
return NOTIFY_OK;
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
- struct task_struct *p;
+ struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
- int min_adj = OOM_ADJUST_MAX + 1;
+ int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int selected_tasksize = 0;
- int selected_oom_adj;
+ int selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
@@ -115,80 +117,77 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
- min_adj = lowmem_adj[i];
+ min_score_adj = lowmem_adj[i];
break;
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_adj);
+ other_file, min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
- if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
- selected_oom_adj = min_adj;
-
- read_lock(&tasklist_lock);
- for_each_process(p) {
- struct mm_struct *mm;
- struct signal_struct *sig;
- int oom_adj;
-
- task_lock(p);
- mm = p->mm;
- sig = p->signal;
- if (!mm || !sig) {
- task_unlock(p);
+ selected_oom_score_adj = min_score_adj;
+
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *p;
+ int oom_score_adj;
+
+ if (tsk->flags & PF_KTHREAD)
continue;
- }
- oom_adj = sig->oom_adj;
- if (oom_adj < min_adj) {
+
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+
+ oom_score_adj = p->signal->oom_score_adj;
+ if (oom_score_adj < min_score_adj) {
task_unlock(p);
continue;
}
- tasksize = get_mm_rss(mm);
+ tasksize = get_mm_rss(p->mm);
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {
- if (oom_adj < selected_oom_adj)
+ if (oom_score_adj < selected_oom_score_adj)
continue;
- if (oom_adj == selected_oom_adj &&
+ if (oom_score_adj == selected_oom_score_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
- selected_oom_adj = oom_adj;
+ selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
- p->pid, p->comm, oom_adj, tasksize);
+ p->pid, p->comm, oom_score_adj, tasksize);
}
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
- selected_oom_adj, selected_tasksize);
+ selected_oom_score_adj, selected_tasksize);
/*
- * If CONFIG_PROFILING is off, then task_handoff_register()
- * is a nop. In that case we don't want to stall the killer
- * by setting lowmem_deathpending.
+ * If CONFIG_PROFILING is off, then we don't want to stall
+ * the killer by setting lowmem_deathpending.
*/
#ifdef CONFIG_PROFILING
lowmem_deathpending = selected;
lowmem_deathpending_timeout = jiffies + HZ;
- task_handoff_register(&task_nb);
#endif
- force_sig(SIGKILL, selected);
+ send_sig(SIGKILL, selected, 0);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return rem;
}
@@ -199,6 +198,7 @@ static struct shrinker lowmem_shrinker = {
static int __init lowmem_init(void)
{
+ task_handoff_register(&task_nb);
register_shrinker(&lowmem_shrinker);
return 0;
}
@@ -206,6 +206,7 @@ static int __init lowmem_init(void)
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
+ task_handoff_unregister(&task_nb);
}
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
new file mode 100644
index 00000000000..e08f2574e30
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "persistent_ram.h"
+
+struct persistent_ram_buffer {
+ uint32_t sig;
+ atomic_t start;
+ atomic_t size;
+ uint8_t data[0];
+};
+
+#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
+
+static __initdata LIST_HEAD(persistent_ram_list);
+
+static inline size_t buffer_size(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->size);
+}
+
+static inline size_t buffer_start(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->start);
+}
+
+/* increase and wrap the start pointer, returning the old value */
+static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(&prz->buffer->start);
+ new = old + a;
+ while (unlikely(new > prz->buffer_size))
+ new -= prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+
+ return old;
+}
+
+/* increase the size counter until it hits the max size */
+static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+{
+ size_t old;
+ size_t new;
+
+ if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+ return;
+
+ do {
+ old = atomic_read(&prz->buffer->size);
+ new = old + a;
+ if (new > prz->buffer_size)
+ new = prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+}
+
+/* increase the size counter, retuning an error if it hits the max size */
+static inline ssize_t buffer_size_add_clamp(struct persistent_ram_zone *prz,
+ size_t a)
+{
+ size_t old;
+ size_t new;
+
+ do {
+ old = atomic_read(&prz->buffer->size);
+ new = old + a;
+ if (new > prz->buffer_size)
+ return -ENOMEM;
+ } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+
+ return 0;
+}
+
+static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_size];
+
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(prz->rs_decoder, data, len, par, 0);
+ for (i = 0; i < prz->ecc_size; i++)
+ ecc[i] = par[i];
+}
+
+static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
+ void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_size];
+
+ for (i = 0; i < prz->ecc_size; i++)
+ par[i] = ecc[i];
+ return decode_rs8(prz->rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+
+static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
+ unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *buffer_end = buffer->data + prz->buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int ecc_block_size = prz->ecc_block_size;
+ int ecc_size = prz->ecc_size;
+ int size = prz->ecc_block_size;
+
+ if (!prz->ecc)
+ return;
+
+ block = buffer->data + (start & ~(ecc_block_size - 1));
+ par = prz->par_buffer + (start / ecc_block_size) * prz->ecc_size;
+
+ do {
+ if (block + ecc_block_size > buffer_end)
+ size = buffer_end - block;
+ persistent_ram_encode_rs8(prz, block, size, par);
+ block += ecc_block_size;
+ par += ecc_size;
+ } while (block < buffer->data + start + count);
+}
+
+static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+
+ if (!prz->ecc)
+ return;
+
+ persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
+ prz->par_header);
+}
+
+static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *block;
+ uint8_t *par;
+
+ if (!prz->ecc)
+ return;
+
+ block = buffer->data;
+ par = prz->par_buffer;
+ while (block < buffer->data + buffer_size(prz)) {
+ int numerr;
+ int size = prz->ecc_block_size;
+ if (block + size > buffer->data + prz->buffer_size)
+ size = buffer->data + prz->buffer_size - block;
+ numerr = persistent_ram_decode_rs8(prz, block, size, par);
+ if (numerr > 0) {
+ pr_devel("persistent_ram: error in block %p, %d\n",
+ block, numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_devel("persistent_ram: uncorrectable error in block %p\n",
+ block);
+ prz->bad_blocks++;
+ }
+ block += prz->ecc_block_size;
+ par += prz->ecc_size;
+ }
+}
+
+static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ size_t buffer_size)
+{
+ int numerr;
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ int ecc_blocks;
+
+ if (!prz->ecc)
+ return 0;
+
+ prz->ecc_block_size = 128;
+ prz->ecc_size = 16;
+ prz->ecc_symsize = 8;
+ prz->ecc_poly = 0x11d;
+
+ ecc_blocks = DIV_ROUND_UP(prz->buffer_size, prz->ecc_block_size);
+ prz->buffer_size -= (ecc_blocks + 1) * prz->ecc_size;
+
+ if (prz->buffer_size > buffer_size) {
+ pr_err("persistent_ram: invalid size %zu, non-ecc datasize %zu\n",
+ buffer_size, prz->buffer_size);
+ return -EINVAL;
+ }
+
+ prz->par_buffer = buffer->data + prz->buffer_size;
+ prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_size;
+
+ /*
+ * first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ prz->rs_decoder = init_rs(prz->ecc_symsize, prz->ecc_poly, 0, 1,
+ prz->ecc_size);
+ if (prz->rs_decoder == NULL) {
+ pr_info("persistent_ram: init_rs failed\n");
+ return -EINVAL;
+ }
+
+ prz->corrected_bytes = 0;
+ prz->bad_blocks = 0;
+
+ numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
+ prz->par_header);
+ if (numerr > 0) {
+ pr_info("persistent_ram: error in header, %d\n", numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_info("persistent_ram: uncorrectable error in header\n");
+ prz->bad_blocks++;
+ }
+
+ return 0;
+}
+
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len)
+{
+ ssize_t ret;
+
+ if (prz->corrected_bytes || prz->bad_blocks)
+ ret = snprintf(str, len, ""
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ prz->corrected_bytes, prz->bad_blocks);
+ else
+ ret = snprintf(str, len, "\nNo errors detected\n");
+
+ return ret;
+}
+
+static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+ const void *s, unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ memcpy(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+}
+
+static void __init
+persistent_ram_save_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ size_t size = buffer_size(prz);
+ size_t start = buffer_start(prz);
+ char *dest;
+
+ persistent_ram_ecc_old(prz);
+
+ dest = kmalloc(size, GFP_KERNEL);
+ if (dest == NULL) {
+ pr_err("persistent_ram: failed to allocate buffer\n");
+ return;
+ }
+
+ prz->old_log = dest;
+ prz->old_log_size = size;
+ memcpy(prz->old_log, &buffer->data[start], size - start);
+ memcpy(prz->old_log + size - start, &buffer->data[0], start);
+}
+
+int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+ const void *s, unsigned int count)
+{
+ int rem;
+ int c = count;
+ size_t start;
+
+ if (unlikely(c > prz->buffer_size)) {
+ s += c - prz->buffer_size;
+ c = prz->buffer_size;
+ }
+
+ buffer_size_add_clamp(prz, c);
+
+ start = buffer_start_add(prz, c);
+
+ rem = prz->buffer_size - start;
+ if (unlikely(rem < c)) {
+ persistent_ram_update(prz, s, start, rem);
+ s += rem;
+ c -= rem;
+ start = 0;
+ }
+ persistent_ram_update(prz, s, start, c);
+
+ persistent_ram_update_header_ecc(prz);
+
+ return count;
+}
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
+{
+ return prz->old_log_size;
+}
+
+void *persistent_ram_old(struct persistent_ram_zone *prz)
+{
+ return prz->old_log;
+}
+
+void persistent_ram_free_old(struct persistent_ram_zone *prz)
+{
+ kfree(prz->old_log);
+ prz->old_log = NULL;
+ prz->old_log_size = 0;
+}
+
+static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+ struct persistent_ram_zone *prz)
+{
+ struct page **pages;
+ phys_addr_t page_start;
+ unsigned int page_count;
+ pgprot_t prot;
+ unsigned int i;
+
+ page_start = start - offset_in_page(start);
+ page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+
+ prot = pgprot_noncached(PAGE_KERNEL);
+
+ pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
+ if (!pages) {
+ pr_err("%s: Failed to allocate array for %u pages\n", __func__,
+ page_count);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+ prz->vaddr = vmap(pages, page_count, VM_MAP, prot);
+ kfree(pages);
+ if (!prz->vaddr) {
+ pr_err("%s: Failed to map %u pages\n", __func__, page_count);
+ return -ENOMEM;
+ }
+
+ prz->buffer = prz->vaddr + offset_in_page(start);
+ prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
+
+ return 0;
+}
+
+static int __init persistent_ram_buffer_init(const char *name,
+ struct persistent_ram_zone *prz)
+{
+ int i;
+ struct persistent_ram *ram;
+ struct persistent_ram_descriptor *desc;
+ phys_addr_t start;
+
+ list_for_each_entry(ram, &persistent_ram_list, node) {
+ start = ram->start;
+ for (i = 0; i < ram->num_descs; i++) {
+ desc = &ram->descs[i];
+ if (!strcmp(desc->name, name))
+ return persistent_ram_buffer_map(start,
+ desc->size, prz);
+ start += desc->size;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static __init
+struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
+{
+ struct persistent_ram_zone *prz;
+ int ret;
+
+ prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
+ if (!prz) {
+ pr_err("persistent_ram: failed to allocate persistent ram zone\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&prz->node);
+
+ ret = persistent_ram_buffer_init(dev_name(dev), prz);
+ if (ret) {
+ pr_err("persistent_ram: failed to initialize buffer\n");
+ return ERR_PTR(ret);
+ }
+
+ prz->ecc = ecc;
+ ret = persistent_ram_init_ecc(prz, prz->buffer_size);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
+ if (buffer_size(prz) > prz->buffer_size ||
+ buffer_start(prz) > buffer_size(prz))
+ pr_info("persistent_ram: found existing invalid buffer,"
+ " size %ld, start %ld\n",
+ buffer_size(prz), buffer_start(prz));
+ else {
+ pr_info("persistent_ram: found existing buffer,"
+ " size %ld, start %ld\n",
+ buffer_size(prz), buffer_start(prz));
+ persistent_ram_save_old(prz);
+ }
+ } else {
+ pr_info("persistent_ram: no valid data in buffer"
+ " (sig = 0x%08x)\n", prz->buffer->sig);
+ }
+
+ prz->buffer->sig = PERSISTENT_RAM_SIG;
+ atomic_set(&prz->buffer->start, 0);
+ atomic_set(&prz->buffer->size, 0);
+
+ return prz;
+}
+
+struct persistent_ram_zone * __init
+persistent_ram_init_ringbuffer(struct device *dev, bool ecc)
+{
+ return __persistent_ram_init(dev, ecc);
+}
+
+int __init persistent_ram_early_init(struct persistent_ram *ram)
+{
+ int ret;
+
+ ret = memblock_reserve(ram->start, ram->size);
+ if (ret) {
+ pr_err("Failed to reserve persistent memory from %08lx-%08lx\n",
+ (long)ram->start, (long)(ram->start + ram->size - 1));
+ return ret;
+ }
+
+ list_add_tail(&ram->node, &persistent_ram_list);
+
+ pr_info("Initialized persistent memory from %08lx-%08lx\n",
+ (long)ram->start, (long)(ram->start + ram->size - 1));
+
+ return 0;
+}
diff --git a/drivers/staging/android/persistent_ram.h b/drivers/staging/android/persistent_ram.h
new file mode 100644
index 00000000000..f41e2086c64
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_PERSISTENT_RAM_H__
+#define __LINUX_PERSISTENT_RAM_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct persistent_ram_buffer;
+
+struct persistent_ram_descriptor {
+ const char *name;
+ phys_addr_t size;
+};
+
+struct persistent_ram {
+ phys_addr_t start;
+ phys_addr_t size;
+
+ int num_descs;
+ struct persistent_ram_descriptor *descs;
+
+ struct list_head node;
+};
+
+struct persistent_ram_zone {
+ struct list_head node;
+ void *vaddr;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
+
+ /* ECC correction */
+ bool ecc;
+ char *par_buffer;
+ char *par_header;
+ struct rs_control *rs_decoder;
+ int corrected_bytes;
+ int bad_blocks;
+ int ecc_block_size;
+ int ecc_size;
+ int ecc_symsize;
+ int ecc_poly;
+
+ char *old_log;
+ size_t old_log_size;
+ size_t old_log_footer_size;
+ bool early;
+};
+
+int persistent_ram_early_init(struct persistent_ram *ram);
+
+struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
+ bool ecc);
+
+int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
+ unsigned int count);
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
+void *persistent_ram_old(struct persistent_ram_zone *prz);
+void persistent_ram_free_old(struct persistent_ram_zone *prz);
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len);
+
+#endif
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index 6d4d67924f2..ce140ffc54e 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -21,129 +21,24 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include "persistent_ram.h"
#include "ram_console.h"
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-#include <linux/rslib.h>
-#endif
-
-struct ram_console_buffer {
- uint32_t sig;
- uint32_t start;
- uint32_t size;
- uint8_t data[0];
-};
-
-#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-static char __initdata
- ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
-#endif
-static char *ram_console_old_log;
-static size_t ram_console_old_log_size;
-
-static struct ram_console_buffer *ram_console_buffer;
-static size_t ram_console_buffer_size;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-static char *ram_console_par_buffer;
-static struct rs_control *ram_console_rs_decoder;
-static int ram_console_corrected_bytes;
-static int ram_console_bad_blocks;
-#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
-#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
-#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
-#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
-#endif
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
-{
- int i;
- uint16_t par[ECC_SIZE];
- /* Initialize the parity buffer */
- memset(par, 0, sizeof(par));
- encode_rs8(ram_console_rs_decoder, data, len, par, 0);
- for (i = 0; i < ECC_SIZE; i++)
- ecc[i] = par[i];
-}
-
-static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
-{
- int i;
- uint16_t par[ECC_SIZE];
- for (i = 0; i < ECC_SIZE; i++)
- par[i] = ecc[i];
- return decode_rs8(ram_console_rs_decoder, data, par, len,
- NULL, 0, NULL, 0, NULL);
-}
-#endif
-
-static void ram_console_update(const char *s, unsigned int count)
-{
- struct ram_console_buffer *buffer = ram_console_buffer;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
- uint8_t *block;
- uint8_t *par;
- int size = ECC_BLOCK_SIZE;
-#endif
- memcpy(buffer->data + buffer->start, s, count);
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
- par = ram_console_par_buffer +
- (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
- do {
- if (block + ECC_BLOCK_SIZE > buffer_end)
- size = buffer_end - block;
- ram_console_encode_rs8(block, size, par);
- block += ECC_BLOCK_SIZE;
- par += ECC_SIZE;
- } while (block < buffer->data + buffer->start + count);
-#endif
-}
-
-static void ram_console_update_header(void)
-{
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- struct ram_console_buffer *buffer = ram_console_buffer;
- uint8_t *par;
- par = ram_console_par_buffer +
- DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
- ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
-#endif
-}
+static struct persistent_ram_zone *ram_console_zone;
+static const char *bootinfo;
+static size_t bootinfo_size;
static void
ram_console_write(struct console *console, const char *s, unsigned int count)
{
- int rem;
- struct ram_console_buffer *buffer = ram_console_buffer;
-
- if (count > ram_console_buffer_size) {
- s += count - ram_console_buffer_size;
- count = ram_console_buffer_size;
- }
- rem = ram_console_buffer_size - buffer->start;
- if (rem < count) {
- ram_console_update(s, rem);
- s += rem;
- count -= rem;
- buffer->start = 0;
- buffer->size = ram_console_buffer_size;
- }
- ram_console_update(s, count);
-
- buffer->start += count;
- if (buffer->size < ram_console_buffer_size)
- buffer->size += count;
- ram_console_update_header();
+ struct persistent_ram_zone *prz = console->data;
+ persistent_ram_write(prz, s, count);
}
static struct console ram_console = {
.name = "ram",
.write = ram_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
.index = -1,
};
@@ -155,220 +50,31 @@ void ram_console_enable_console(int enabled)
ram_console.flags &= ~CON_ENABLED;
}
-static void __init
-ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
- char *dest)
-{
- size_t old_log_size = buffer->size;
- size_t bootinfo_size = 0;
- size_t total_size = old_log_size;
- char *ptr;
- const char *bootinfo_label = "Boot info:\n";
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- uint8_t *block;
- uint8_t *par;
- char strbuf[80];
- int strbuf_len = 0;
-
- block = buffer->data;
- par = ram_console_par_buffer;
- while (block < buffer->data + buffer->size) {
- int numerr;
- int size = ECC_BLOCK_SIZE;
- if (block + size > buffer->data + ram_console_buffer_size)
- size = buffer->data + ram_console_buffer_size - block;
- numerr = ram_console_decode_rs8(block, size, par);
- if (numerr > 0) {
-#if 0
- printk(KERN_INFO "ram_console: error in block %p, %d\n",
- block, numerr);
-#endif
- ram_console_corrected_bytes += numerr;
- } else if (numerr < 0) {
-#if 0
- printk(KERN_INFO "ram_console: uncorrectable error in "
- "block %p\n", block);
-#endif
- ram_console_bad_blocks++;
- }
- block += ECC_BLOCK_SIZE;
- par += ECC_SIZE;
- }
- if (ram_console_corrected_bytes || ram_console_bad_blocks)
- strbuf_len = snprintf(strbuf, sizeof(strbuf),
- "\n%d Corrected bytes, %d unrecoverable blocks\n",
- ram_console_corrected_bytes, ram_console_bad_blocks);
- else
- strbuf_len = snprintf(strbuf, sizeof(strbuf),
- "\nNo errors detected\n");
- if (strbuf_len >= sizeof(strbuf))
- strbuf_len = sizeof(strbuf) - 1;
- total_size += strbuf_len;
-#endif
-
- if (bootinfo)
- bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
- total_size += bootinfo_size;
-
- if (dest == NULL) {
- dest = kmalloc(total_size, GFP_KERNEL);
- if (dest == NULL) {
- printk(KERN_ERR
- "ram_console: failed to allocate buffer\n");
- return;
- }
- }
-
- ram_console_old_log = dest;
- ram_console_old_log_size = total_size;
- memcpy(ram_console_old_log,
- &buffer->data[buffer->start], buffer->size - buffer->start);
- memcpy(ram_console_old_log + buffer->size - buffer->start,
- &buffer->data[0], buffer->start);
- ptr = ram_console_old_log + old_log_size;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- memcpy(ptr, strbuf, strbuf_len);
- ptr += strbuf_len;
-#endif
- if (bootinfo) {
- memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
- ptr += strlen(bootinfo_label);
- memcpy(ptr, bootinfo, bootinfo_size);
- ptr += bootinfo_size;
- }
-}
-
-static int __init ram_console_init(struct ram_console_buffer *buffer,
- size_t buffer_size, const char *bootinfo,
- char *old_buf)
+static int __init ram_console_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- int numerr;
- uint8_t *par;
-#endif
- ram_console_buffer = buffer;
- ram_console_buffer_size =
- buffer_size - sizeof(struct ram_console_buffer);
-
- if (ram_console_buffer_size > buffer_size) {
- pr_err("ram_console: buffer %p, invalid size %zu, "
- "datasize %zu\n", buffer, buffer_size,
- ram_console_buffer_size);
- return 0;
- }
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
- ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
-
- if (ram_console_buffer_size > buffer_size) {
- pr_err("ram_console: buffer %p, invalid size %zu, "
- "non-ecc datasize %zu\n",
- buffer, buffer_size, ram_console_buffer_size);
- return 0;
- }
-
- ram_console_par_buffer = buffer->data + ram_console_buffer_size;
-
-
- /* first consecutive root is 0
- * primitive element to generate roots = 1
- */
- ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
- if (ram_console_rs_decoder == NULL) {
- printk(KERN_INFO "ram_console: init_rs failed\n");
- return 0;
- }
-
- ram_console_corrected_bytes = 0;
- ram_console_bad_blocks = 0;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+ struct persistent_ram_zone *prz;
- par = ram_console_par_buffer +
- DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ prz = persistent_ram_init_ringbuffer(&pdev->dev, true);
+ if (IS_ERR(prz))
+ return PTR_ERR(prz);
- numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
- if (numerr > 0) {
- printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
- ram_console_corrected_bytes += numerr;
- } else if (numerr < 0) {
- printk(KERN_INFO
- "ram_console: uncorrectable error in header\n");
- ram_console_bad_blocks++;
- }
-#endif
- if (buffer->sig == RAM_CONSOLE_SIG) {
- if (buffer->size > ram_console_buffer_size
- || buffer->start > buffer->size)
- printk(KERN_INFO "ram_console: found existing invalid "
- "buffer, size %d, start %d\n",
- buffer->size, buffer->start);
- else {
- printk(KERN_INFO "ram_console: found existing buffer, "
- "size %d, start %d\n",
- buffer->size, buffer->start);
- ram_console_save_old(buffer, bootinfo, old_buf);
- }
- } else {
- printk(KERN_INFO "ram_console: no valid data in buffer "
- "(sig = 0x%08x)\n", buffer->sig);
+ if (pdata) {
+ bootinfo = kstrdup(pdata->bootinfo, GFP_KERNEL);
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo);
}
- buffer->sig = RAM_CONSOLE_SIG;
- buffer->start = 0;
- buffer->size = 0;
+ ram_console_zone = prz;
+ ram_console.data = prz;
register_console(&ram_console);
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
- console_verbose();
-#endif
- return 0;
-}
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-static int __init ram_console_early_init(void)
-{
- return ram_console_init((struct ram_console_buffer *)
- CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
- CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
- NULL,
- ram_console_old_log_init_buffer);
-}
-#else
-static int ram_console_driver_probe(struct platform_device *pdev)
-{
- struct resource *res = pdev->resource;
- size_t start;
- size_t buffer_size;
- void *buffer;
- const char *bootinfo = NULL;
- struct ram_console_platform_data *pdata = pdev->dev.platform_data;
-
- if (res == NULL || pdev->num_resources != 1 ||
- !(res->flags & IORESOURCE_MEM)) {
- printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
- "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
- return -ENXIO;
- }
- buffer_size = res->end - res->start + 1;
- start = res->start;
- printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
- start, buffer_size);
- buffer = ioremap(res->start, buffer_size);
- if (buffer == NULL) {
- printk(KERN_ERR "ram_console: failed to map memory\n");
- return -ENOMEM;
- }
-
- if (pdata)
- bootinfo = pdata->bootinfo;
-
- return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+ return 0;
}
static struct platform_driver ram_console_driver = {
- .probe = ram_console_driver_probe,
.driver = {
.name = "ram_console",
},
@@ -376,10 +82,11 @@ static struct platform_driver ram_console_driver = {
static int __init ram_console_module_init(void)
{
- int err;
- err = platform_driver_register(&ram_console_driver);
- return err;
+ return platform_driver_probe(&ram_console_driver, ram_console_probe);
}
+
+#ifndef CONFIG_PRINTK
+#define dmesg_restrict 0
#endif
static ssize_t ram_console_read_old(struct file *file, char __user *buf,
@@ -387,14 +94,52 @@ static ssize_t ram_console_read_old(struct file *file, char __user *buf,
{
loff_t pos = *offset;
ssize_t count;
+ struct persistent_ram_zone *prz = ram_console_zone;
+ size_t old_log_size = persistent_ram_old_size(prz);
+ const char *old_log = persistent_ram_old(prz);
+ char *str;
+ int ret;
+
+ if (dmesg_restrict && !capable(CAP_SYSLOG))
+ return -EPERM;
+
+ /* Main last_kmsg log */
+ if (pos < old_log_size) {
+ count = min(len, (size_t)(old_log_size - pos));
+ if (copy_to_user(buf, old_log + pos, count))
+ return -EFAULT;
+ goto out;
+ }
- if (pos >= ram_console_old_log_size)
- return 0;
+ /* ECC correction notice */
+ pos -= old_log_size;
+ count = persistent_ram_ecc_string(prz, NULL, 0);
+ if (pos < count) {
+ str = kmalloc(count, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ persistent_ram_ecc_string(prz, str, count + 1);
+ count = min(len, (size_t)(count - pos));
+ ret = copy_to_user(buf, str + pos, count);
+ kfree(str);
+ if (ret)
+ return -EFAULT;
+ goto out;
+ }
+
+ /* Boot info passed through pdata */
+ pos -= count;
+ if (pos < bootinfo_size) {
+ count = min(len, (size_t)(bootinfo_size - pos));
+ if (copy_to_user(buf, bootinfo + pos, count))
+ return -EFAULT;
+ goto out;
+ }
- count = min(len, (size_t)(ram_console_old_log_size - pos));
- if (copy_to_user(buf, ram_console_old_log + pos, count))
- return -EFAULT;
+ /* EOF */
+ return 0;
+out:
*offset += count;
return count;
}
@@ -407,37 +152,28 @@ static const struct file_operations ram_console_file_ops = {
static int __init ram_console_late_init(void)
{
struct proc_dir_entry *entry;
+ struct persistent_ram_zone *prz = ram_console_zone;
- if (ram_console_old_log == NULL)
+ if (!prz)
return 0;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
- ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
- if (ram_console_old_log == NULL) {
- printk(KERN_ERR
- "ram_console: failed to allocate buffer for old log\n");
- ram_console_old_log_size = 0;
+
+ if (persistent_ram_old_size(prz) == 0)
return 0;
- }
- memcpy(ram_console_old_log,
- ram_console_old_log_init_buffer, ram_console_old_log_size);
-#endif
+
entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
if (!entry) {
printk(KERN_ERR "ram_console: failed to create proc entry\n");
- kfree(ram_console_old_log);
- ram_console_old_log = NULL;
+ persistent_ram_free_old(prz);
return 0;
}
entry->proc_fops = &ram_console_file_ops;
- entry->size = ram_console_old_log_size;
+ entry->size = persistent_ram_old_size(prz) +
+ persistent_ram_ecc_string(prz, NULL, 0) +
+ bootinfo_size;
+
return 0;
}
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-console_initcall(ram_console_early_init);
-#else
-postcore_initcall(ram_console_module_init);
-#endif
late_initcall(ram_console_late_init);
-
+postcore_initcall(ram_console_module_init);
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index a64481c3e86..bc723eff11a 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -29,9 +29,9 @@ struct timed_gpio_data {
struct timed_output_dev dev;
struct hrtimer timer;
spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
};
static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
index a0e15f8be3f..d29e169d7eb 100644
--- a/drivers/staging/android/timed_gpio.h
+++ b/drivers/staging/android/timed_gpio.h
@@ -20,13 +20,13 @@
struct timed_gpio {
const char *name;
- unsigned gpio;
+ unsigned gpio;
int max_timeout;
- u8 active_low;
+ u8 active_low;
};
struct timed_gpio_platform_data {
- int num_gpios;
+ int num_gpios;
struct timed_gpio *gpios;
};
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 1df9586f273..83549d9cfef 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -159,7 +159,6 @@ static void setup_packet_header(struct asus_oled_packet *packet, char flags,
static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
{
- int a;
int retval;
int act_len;
struct asus_oled_packet *packet;
@@ -178,17 +177,15 @@ static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
else
packet->bitmap[0] = 0xae;
- for (a = 0; a < 1; a++) {
- retval = usb_bulk_msg(odev->udev,
- usb_sndbulkpipe(odev->udev, 2),
- packet,
- sizeof(struct asus_oled_header) + 1,
- &act_len,
- -1);
+ retval = usb_bulk_msg(odev->udev,
+ usb_sndbulkpipe(odev->udev, 2),
+ packet,
+ sizeof(struct asus_oled_header) + 1,
+ &act_len,
+ -1);
- if (retval)
- dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
- }
+ if (retval)
+ dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
odev->enabled = enabl;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 179707b5e7c..cf305921695 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -728,14 +728,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
down(&Adapter->LowPowerModeSync);
Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
@@ -1140,15 +1136,10 @@ cntrlEnd:
if (IoBuffer.InputLength < sizeof(ULONG) * 2)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1302,20 +1293,18 @@ cntrlEnd:
/*
* Deny the access if the offset crosses the cal area limit.
*/
+ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
+ return STATUS_FAILURE;
- if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
+ if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
return STATUS_FAILURE;
}
- pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
- if (!pReadData)
- return -ENOMEM;
-
- if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- kfree(pReadData);
- return -EFAULT;
- }
+ pReadData = memdup_user(stNVMReadWrite.pBuffer,
+ stNVMReadWrite.uiNumBytes);
+ if (IS_ERR(pReadData))
+ return PTR_ERR(pReadData);
do_gettimeofday(&tv0);
if (IOCTL_BCM_NVM_READ == cmd) {
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index c0ee95a7134..7e38af5e176 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -1,431 +1,359 @@
/************************************************************
-* CMHOST.C
-* This file contains the routines for handling Connection
-* Management.
-************************************************************/
+ * CMHOST.C
+ * This file contains the routines for handling Connection
+ * Management.
+ ************************************************************/
-//#define CONN_MSG
+/* #define CONN_MSG */
#include "headers.h"
-typedef enum _E_CLASSIFIER_ACTION
-{
+enum E_CLASSIFIER_ACTION {
eInvalidClassifierAction,
eAddClassifier,
eReplaceClassifier,
eDeleteClassifier
-}E_CLASSIFIER_ACTION;
+};
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid);
/************************************************************
-* Function - SearchSfid
-*
-* Description - This routinue would search QOS queues having
-* specified SFID as input parameter.
-*
-* Parameters - Adapter: Pointer to the Adapter structure
-* uiSfid : Given SFID for matching
-*
-* Returns - Queue index for this SFID(If matched)
- Else Invalid Queue Index(If Not matched)
-************************************************************/
-INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
+ * Function - SearchSfid
+ *
+ * Description - This routinue would search QOS queues having
+ * specified SFID as input parameter.
+ *
+ * Parameters - Adapter: Pointer to the Adapter structure
+ * uiSfid : Given SFID for matching
+ *
+ * Returns - Queue index for this SFID(If matched)
+ * Else Invalid Queue Index(If Not matched)
+ ************************************************************/
+int SearchSfid(PMINI_ADAPTER Adapter, UINT uiSfid)
{
- INT iIndex=0;
- for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--)
- if(Adapter->PackInfo[iIndex].ulSFID==uiSfid)
- return iIndex;
+ int i;
+
+ for (i = (NO_OF_QUEUES-1); i >= 0; i--)
+ if (Adapter->PackInfo[i].ulSFID == uiSfid)
+ return i;
+
return NO_OF_QUEUES+1;
}
/***************************************************************
-* Function - SearchFreeSfid
-*
-* Description - This routinue would search Free available SFID.
-*
-* Parameter - Adapter: Pointer to the Adapter structure
-*
-* Returns - Queue index for the free SFID
-* Else returns Invalid Index.
-****************************************************************/
-static INT SearchFreeSfid(PMINI_ADAPTER Adapter)
+ * Function -SearchFreeSfid
+ *
+ * Description - This routinue would search Free available SFID.
+ *
+ * Parameter - Adapter: Pointer to the Adapter structure
+ *
+ * Returns - Queue index for the free SFID
+ * Else returns Invalid Index.
+ ****************************************************************/
+static int SearchFreeSfid(PMINI_ADAPTER Adapter)
{
- UINT uiIndex=0;
+ int i;
+
+ for (i = 0; i < (NO_OF_QUEUES-1); i++)
+ if (Adapter->PackInfo[i].ulSFID == 0)
+ return i;
- for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
- if(Adapter->PackInfo[uiIndex].ulSFID==0)
- return uiIndex;
return NO_OF_QUEUES+1;
}
/*
-Function: SearchClsid
-Description: This routinue would search Classifier having specified ClassifierID as input parameter
-Input parameters: PMINI_ADAPTER Adapter - Adapter Context
- unsigned int uiSfid - The SF in which the classifier is to searched
- B_UINT16 uiClassifierID - The classifier ID to be searched
-Return: int :Classifier table index of matching entry
-*/
-
-static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
+ * Function: SearchClsid
+ * Description: This routinue would search Classifier having specified ClassifierID as input parameter
+ * Input parameters: PMINI_ADAPTER Adapter - Adapter Context
+ * unsigned int uiSfid - The SF in which the classifier is to searched
+ * B_UINT16 uiClassifierID - The classifier ID to be searched
+ * Return: int :Classifier table index of matching entry
+ */
+static int SearchClsid(PMINI_ADAPTER Adapter, ULONG ulSFID, B_UINT16 uiClassifierID)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
- (Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex == uiClassifierID)&&
- (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == ulSFID))
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if ((Adapter->astClassifierTable[i].bUsed) &&
+ (Adapter->astClassifierTable[i].uiClassifierRuleIndex == uiClassifierID) &&
+ (Adapter->astClassifierTable[i].ulSFID == ulSFID))
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would search Free available Classifier entry in classifier table.
-@return free Classifier Entry index in classifier table for specified SF
-*/
-static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
- )
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would search Free available Classifier entry in classifier table.
+ * @return free Classifier Entry index in classifier table for specified SF
+ */
+static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if(!Adapter->astClassifierTable[uiClassifierIndex].bUsed)
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (!Adapter->astClassifierTable[i].bUsed)
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
- //deleting all the packet held in the SF
- flush_queue(Adapter,uiSearchRuleIndex);
+ /* deleting all the packet held in the SF */
+ flush_queue(Adapter, uiSearchRuleIndex);
- //Deleting the all classifiers for this SF
- DeleteAllClassifiersForSF(Adapter,uiSearchRuleIndex);
+ /* Deleting the all classifiers for this SF */
+ DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
- //Resetting only MIBS related entries in the SF
+ /* Resetting only MIBS related entries in the SF */
memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
static inline VOID
-CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry ,
- B_UINT8 u8IpAddressLen , B_UINT8 *pu8IpAddressMaskSrc ,
- BOOLEAN bIpVersion6 , E_IPADDR_CONTEXT eIpAddrContext)
+CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry,
+ B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
+ BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
{
- UINT ucLoopIndex=0;
- UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
- UCHAR *ptrClassifierIpAddress = NULL;
- UCHAR *ptrClassifierIpMask = NULL;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int i = 0;
+ UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
+ UCHAR *ptrClassifierIpAddress = NULL;
+ UCHAR *ptrClassifierIpMask = NULL;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(bIpVersion6)
- {
+ if (bIpVersion6)
nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
- }
- //Destination Ip Address
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Address Range Length:0x%X ",
- u8IpAddressLen);
- if((bIpVersion6?(IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2):
- (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen)
- {
+
+ /* Destination Ip Address */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Address Range Length:0x%X ", u8IpAddressLen);
+ if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
+ (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
/*
- //checking both the mask and address togethor in Classification.
- //So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
- //(nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
- */
- if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->ucIPDestinationAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
- }
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
- }
- }
- else if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->ucIPSourceAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ * checking both the mask and address togethor in Classification.
+ * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
+ * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
+ */
+ if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->ucIPDestinationAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
}
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
+ } else if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->ucIPSourceAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
}
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Address Length:0x%X \n",
- pstClassifierEntry->ucIPDestinationAddressLength);
- while((u8IpAddressLen>= nSizeOfIPAddressInBytes) &&
- (ucLoopIndex < MAX_IP_RANGE_LENGTH))
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Address Length:0x%X\n", pstClassifierEntry->ucIPDestinationAddressLength);
+ while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) && (i < MAX_IP_RANGE_LENGTH)) {
memcpy(ptrClassifierIpAddress +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc+(ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i * nSizeOfIPAddressInBytes),
+ (pu8IpAddressMaskSrc+(i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- if(u8IpAddressLen >= nSizeOfIPAddressInBytes)
- {
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
+ if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
memcpy(ptrClassifierIpMask +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
+ (i * nSizeOfIPAddressInBytes),
(pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes +
- (ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Mask Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex] =
- ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Mask Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Mask[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- }
- if(0==u8IpAddressLen)
- {
- pstClassifierEntry->bDestIpValid=TRUE;
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
}
- ucLoopIndex++;
+ if (u8IpAddressLen == 0)
+ pstClassifierEntry->bDestIpValid = TRUE;
+
+ i++;
}
- if(bIpVersion6)
- {
- //Restore EndianNess of Struct
- for(ucLoopIndex =0 ; ucLoopIndex < MAX_IP_RANGE_LENGTH * 4 ;
- ucLoopIndex++)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv6Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stDestIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Mask[ucLoopIndex]);
+ if (bIpVersion6) {
+ /* Restore EndianNess of Struct */
+ for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i]);
}
}
}
}
}
-
-void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll)
+void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
{
- ULONG ulIndex;
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable; ulIndex++)
- {
- if(Adapter->astTargetDsxBuffer[ulIndex].valid)
+ int i;
+
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ if (Adapter->astTargetDsxBuffer[i].valid)
continue;
- if ((bFreeAll) || (Adapter->astTargetDsxBuffer[ulIndex].tid == TID)){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
- TID, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
+
+ if ((bFreeAll) || (Adapter->astTargetDsxBuffer[i].tid == TID)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
+ TID, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
Adapter->ulFreeTargetBufferCnt++;
- }
+ }
}
}
-/**
-@ingroup ctrl_pkt_functions
-copy classifier rule into the specified SF index
-*/
-static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLTypes *psfCSType,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ * copy classifier rule into the specified SF index
+ */
+static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter, stConvergenceSLTypes *psfCSType, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- //VOID *pvPhsContext = NULL;
- UINT ucLoopIndex=0;
- //UCHAR ucProtocolLength=0;
- //ULONG ulPhsStatus;
-
+ /* VOID *pvPhsContext = NULL; */
+ int i;
+ /* UCHAR ucProtocolLength=0; */
+ /* ULONG ulPhsStatus; */
- if(Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
+ if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
nClassifierIndex > (MAX_CLASSIFIERS-1))
return;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Storing Classifier Rule Index : %X",
+ ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Storing Classifier Rule Index : %X",ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
-
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
- //Store if Ipv6
- pstClassifierEntry->bIpv6Protocol =
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE;
-
- //Destinaiton Port
- pstClassifierEntry->ucDestPortRangeLength=psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength/4;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Length:0x%X ",pstClassifierEntry->ucDestPortRangeLength);
- if( MAX_PORT_RANGE >= psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength)
- {
- for(ucLoopIndex=0;ucLoopIndex<(pstClassifierEntry->ucDestPortRangeLength);ucLoopIndex++)
- {
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Lo:0x%X ",pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]);
+ if (pstClassifierEntry) {
+ /* Store if Ipv6 */
+ pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
+
+ /* Destinaiton Port */
+ pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Length:0x%X ", pstClassifierEntry->ucDestPortRangeLength);
+
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
+ for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
+ pstClassifierEntry->usDestPortRangeLo[i] = *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+i));
+ pstClassifierEntry->usDestPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+i));
+ pstClassifierEntry->usDestPortRangeLo[i] = ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Lo:0x%X ",
+ pstClassifierEntry->usDestPortRangeLo[i]);
+ pstClassifierEntry->usDestPortRangeHi[i] = ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
}
+ } else {
+ pstClassifierEntry->ucDestPortRangeLength = 0;
}
- else
- {
- pstClassifierEntry->ucDestPortRangeLength=0;
- }
- //Source Port
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Length:0x%X ",psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- if(MAX_PORT_RANGE >=
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength)
- {
- pstClassifierEntry->ucSrcPortRangeLength =
- psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRangeLength/4;
- for(ucLoopIndex = 0; ucLoopIndex <
- (pstClassifierEntry->ucSrcPortRangeLength); ucLoopIndex++)
- {
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+2+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- ntohs(pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Lo:0x%X ",pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]);
+
+ /* Source Port */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Length:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
+ pstClassifierEntry->ucSrcPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength/4;
+ for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+i));
+ pstClassifierEntry->usSrcPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+2+i));
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Lo:0x%X ",
+ pstClassifierEntry->usSrcPortRangeLo[i]);
+ pstClassifierEntry->usSrcPortRangeHi[i] = ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
}
}
- //Destination Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Destination Parameters : ");
-
+ /* Destination Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Destination Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?
- TRUE:FALSE, eDestIpAddress);
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
+ TRUE : FALSE, eDestIpAddress);
- //Source Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Source Parameters : ");
+ /* Source Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE,
- eSrcIpAddress);
-
- //TOS
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"TOS Length:0x%X ",psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- if(3 == psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength)
- {
- pstClassifierEntry->ucIPTypeOfServiceLength =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
- pstClassifierEntry->ucTosLow =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
- pstClassifierEntry->ucTosHigh =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
- pstClassifierEntry->ucTosMask =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
+ eSrcIpAddress);
+
+ /* TOS */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "TOS Length:0x%X ", psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ if (psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength == 3) {
+ pstClassifierEntry->ucIPTypeOfServiceLength = psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
+ pstClassifierEntry->ucTosLow = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
+ pstClassifierEntry->ucTosHigh = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
+ pstClassifierEntry->ucTosMask = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
pstClassifierEntry->bTOSValid = TRUE;
}
- if(psfCSType->cCPacketClassificationRule.u8Protocol == 0)
- {
- //we didn't get protocol field filled in by the BS
- pstClassifierEntry->ucProtocolLength=0;
- }
- else
- {
- pstClassifierEntry->ucProtocolLength=1;// 1 valid protocol
+ if (psfCSType->cCPacketClassificationRule.u8Protocol == 0) {
+ /* we didn't get protocol field filled in by the BS */
+ pstClassifierEntry->ucProtocolLength = 0;
+ } else {
+ pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
}
- pstClassifierEntry->ucProtocol[0] =
- psfCSType->cCPacketClassificationRule.u8Protocol;
-
- pstClassifierEntry->u8ClassifierRulePriority =
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
-
- //store the classifier rule ID and set this classifier entry as valid
- pstClassifierEntry->ucDirection =
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
- pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->
- cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- pstClassifierEntry->usVCID_Value =
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- pstClassifierEntry->ulSFID =
- Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
- uiSearchRuleIndex, pstClassifierEntry->ucDirection,
- pstClassifierEntry->uiClassifierRuleIndex,
- pstClassifierEntry->usVCID_Value);
-
- if(psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
- {
+ pstClassifierEntry->ucProtocol[0] = psfCSType->cCPacketClassificationRule.u8Protocol;
+ pstClassifierEntry->u8ClassifierRulePriority = psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
+
+ /* store the classifier rule ID and set this classifier entry as valid */
+ pstClassifierEntry->ucDirection = Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
+ pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ pstClassifierEntry->usVCID_Value = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
+ pstClassifierEntry->ulSFID = Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
+ uiSearchRuleIndex, pstClassifierEntry->ucDirection,
+ pstClassifierEntry->uiClassifierRuleIndex,
+ pstClassifierEntry->usVCID_Value);
+
+ if (psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI;
- }
- //Copy ETH CS Parameters
+ /* Copy ETH CS Parameters */
pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength);
- memcpy(pstClassifierEntry->au8EThCSSrcMAC,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSSrcMACMask,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMAC, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMACMask, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
- memcpy(pstClassifierEntry->au8EThCSDestMAC,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSDestMACMask,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMAC, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMACMask, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- memcpy(pstClassifierEntry->au8EthCSEtherType,psfCSType->cCPacketClassificationRule.u8Ethertype,NUM_ETHERTYPE_BYTES);
+ memcpy(pstClassifierEntry->au8EthCSEtherType, psfCSType->cCPacketClassificationRule.u8Ethertype, NUM_ETHERTYPE_BYTES);
memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2);
pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID);
pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap);
@@ -434,244 +362,199 @@ static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLT
}
}
-
-/**
-@ingroup ctrl_pkt_functions
-*/
-static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- B_UINT16 u16PacketClassificationRuleIndex;
- USHORT usVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ B_UINT16 u16PacketClassificationRuleIndex;
+ USHORT usVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /*ULONG ulPhsStatus; */
usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
- if(usVCID == 0)
+ if (usVCID == 0)
return;
u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
-
-
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
+ if (pstClassifierEntry) {
pstClassifierEntry->bUsed = FALSE;
pstClassifierEntry->uiClassifierRuleIndex = 0;
- memset(pstClassifierEntry,0,sizeof(S_CLASSIFIER_RULE));
+ memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_RULE));
- //Delete the PHS Rule for this classifier
- PhsDeleteClassifierRule(
- &Adapter->stBCMPhsContext,
- usVCID,
- u16PacketClassificationRuleIndex);
+ /* Delete the PHS Rule for this classifier */
+ PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, u16PacketClassificationRuleIndex);
}
}
-/**
-@ingroup ctrl_pkt_functions
-*/
-VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- UINT nClassifierIndex;
- //B_UINT16 u16PacketClassificationRuleIndex;
- USHORT ulVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ int i;
+ /* B_UINT16 u16PacketClassificationRuleIndex; */
+ USHORT ulVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /* ULONG ulPhsStatus; */
ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(ulVCID == 0)
+ if (ulVCID == 0)
return;
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
+ pstClassifierEntry = &Adapter->astClassifierTable[i];
- for(nClassifierIndex =0 ; nClassifierIndex < MAX_CLASSIFIERS ; nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].usVCID_Value == ulVCID)
- {
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry->bUsed)
- {
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
- }
+ if (pstClassifierEntry->bUsed)
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, i);
}
}
- //Delete All Phs Rules Associated with this SF
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- ulVCID);
-
+ /* Delete All Phs Rules Associated with this SF */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
}
-
-/**
-This routinue copies the Connection Management
-related data into the Adapter structure.
-@ingroup ctrl_pkt_functions
-*/
-
-static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- register pstServiceFlowParamSI psfLocalSet, /**<Pointer to the ServiceFlowParamSI structure*/
- register UINT uiSearchRuleIndex, /**<Index of Queue, to which this data belongs*/
- register UCHAR ucDsxType,
- stLocalSFAddIndicationAlt *pstAddIndication)
-{
- //UCHAR ucProtocolLength=0;
- ULONG ulSFID;
- UINT nClassifierIndex = 0;
- E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
- B_UINT16 u16PacketClassificationRuleIndex=0;
- UINT nIndex=0;
+/*
+ * This routinue copies the Connection Management
+ * related data into the Adapter structure.
+ * @ingroup ctrl_pkt_functions
+ */
+static VOID CopyToAdapter(register PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ register pstServiceFlowParamSI psfLocalSet, /* <Pointer to the ServiceFlowParamSI structure */
+ register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
+ register UCHAR ucDsxType,
+ stLocalSFAddIndicationAlt *pstAddIndication) {
+
+ /* UCHAR ucProtocolLength = 0; */
+ ULONG ulSFID;
+ UINT nClassifierIndex = 0;
+ enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
+ B_UINT16 u16PacketClassificationRuleIndex = 0;
+ int i;
stConvergenceSLTypes *psfCSType = NULL;
S_PHS_RULE sPhsRule;
USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
UINT UGIValue = 0;
-
- Adapter->PackInfo[uiSearchRuleIndex].bValid=TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s: SFID= %x ",__FUNCTION__, ntohl(psfLocalSet->u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Updating Queue %d",uiSearchRuleIndex);
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Updating Queue %d", uiSearchRuleIndex);
ulSFID = ntohl(psfLocalSet->u32SFID);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- /*Enable IP/ETh CS Support As Required*/
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : u8CSSpecification : %X\n",psfLocalSet->u8CSSpecification);
- switch(psfLocalSet->u8CSSpecification)
+ /* Enable IP/ETh CS Support As Required */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : u8CSSpecification : %X\n", psfLocalSet->u8CSSpecification);
+ switch (psfLocalSet->u8CSSpecification) {
+ case eCSPacketIPV4:
{
- case eCSPacketIPV4:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
- case eCSPacketIPV6:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- break;
- }
-
- case eCS802_3PacketEthernet:
- case eCS802_1QPacketVLAN:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV4Over802_1QVLAN:
- case eCSPacketIPV4Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV6Over802_1QVLAN:
- case eCSPacketIPV6Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- default:
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error in value of CS Classification.. setting default to IP CS\n");
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
+ case eCSPacketIPV6:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ break;
+ }
+ case eCS802_3PacketEthernet:
+ case eCS802_1QPacketVLAN:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV4Over802_1QVLAN:
+ case eCSPacketIPV4Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV6Over802_1QVLAN:
+ case eCSPacketIPV6Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ default:
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error in value of CS Classification.. setting default to IP CS\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X \n",
- uiSearchRuleIndex,
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
+ uiSearchRuleIndex,
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
- if(Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
- {
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
+ if (Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6;
- }
else
- {
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4;
- }
/* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
- if(!Adapter->bETHCSEnabled)
+ if (!Adapter->bETHCSEnabled)
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- if(psfLocalSet->u8ServiceClassNameLength > 0 &&
- psfLocalSet->u8ServiceClassNameLength < 32)
- {
- memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName,
- psfLocalSet->u8ServiceClassName,
- psfLocalSet->u8ServiceClassNameLength);
- }
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType =
- psfLocalSet->u8ServiceFlowSchedulingType;
+ if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
+ memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName, psfLocalSet->u8ServiceClassName, psfLocalSet->u8ServiceClassNameLength);
- if(Adapter->PackInfo[uiSearchRuleIndex].u8QueueType==BE &&
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
- {
- Adapter->usBestEffortQueueIndex=uiSearchRuleIndex;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
+
+ if (Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == BE && Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
+ Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID);
Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority;
- //copy all the classifier in the Service Flow param structure
- for(nIndex=0; nIndex<psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
+ /* copy all the classifier in the Service Flow param structure */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
-
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
- if(ucDsxType== DSA_ACK)
- {
+ if (ucDsxType == DSA_ACK) {
eClassifierAction = eAddClassifier;
- }
- else if(ucDsxType == DSC_ACK)
- {
- switch(psfCSType->u8ClassfierDSCAction)
- {
- case 0://DSC Add Classifier
+ } else if (ucDsxType == DSC_ACK) {
+ switch (psfCSType->u8ClassfierDSCAction) {
+ case 0: /* DSC Add Classifier */
{
eClassifierAction = eAddClassifier;
}
break;
- case 1://DSC Replace Classifier
+ case 1: /* DSC Replace Classifier */
{
eClassifierAction = eReplaceClassifier;
}
break;
- case 2://DSC Delete Classifier
+ case 2: /* DSC Delete Classifier */
{
eClassifierAction = eDeleteClassifier;
-
}
break;
default:
@@ -683,163 +566,133 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- switch(eClassifierAction)
- {
+ switch (eClassifierAction) {
case eAddClassifier:
{
- //Get a Free Classifier Index From Classifier table for this SF to add the Classifier
- //Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
+ /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
+ /* Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
nClassifierIndex = SearchFreeClsid(Adapter);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To get a free Entry
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Failed To get a free Classifier Entry");
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To get a free Entry */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Failed To get a free Classifier Entry");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
- }
-
- else
- {
- //This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Error The Specified Classifier Already Exists \
- and attempted To Add Classifier with Same PCRI : 0x%x\n", u16PacketClassificationRuleIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
+ } else {
+ /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
+ u16PacketClassificationRuleIndex);
}
}
break;
-
case eReplaceClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be replaced failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be replaced failed");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
case eDeleteClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be deleted failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be deleted failed");
break;
}
- //Delete This classifier
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
+ /* Delete This classifier */
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
default:
{
- //Invalid Action for classifier
+ /* Invalid Action for classifier */
break;
}
}
}
- //Repeat parsing Classification Entries to process PHS Rules
- for(nIndex=0; nIndex < psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n",
- psfCSType->u8PhsDSCAction );
+ /* Repeat parsing Classification Entries to process PHS Rules */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n", psfCSType->u8PhsDSCAction);
- switch (psfCSType->u8PhsDSCAction)
- {
+ switch (psfCSType->u8PhsDSCAction) {
case eDeleteAllPHSRules:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Deleting All PHS Rules For VCID: 0x%X\n",uVCID);
-
- //Delete All the PHS rules for this Service flow
-
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- uVCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Deleting All PHS Rules For VCID: 0x%X\n", uVCID);
+ /* Delete All the PHS rules for this Service flow */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
break;
}
case eDeletePHSRule:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"PHS DSC Action = Delete PHS Rule \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "PHS DSC Action = Delete PHS Rule\n");
+
+ if (psfCSType->cPhsRule.u8PHSI)
+ PhsDeletePHSRule(&Adapter->stBCMPhsContext, uVCID, psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- if(psfCSType->cPhsRule.u8PHSI)
- {
- PhsDeletePHSRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- }
- else
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Error CPHSRule.PHSI is ZERO \n"));
- }
break;
}
- default :
+ default:
{
- if(ucDsxType == DSC_ACK)
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC \n",psfCSType->cPhsRule.u8PHSI));
- break; //FOr DSC ACK Case PHS DSC Action must be in valid set
+ if (ucDsxType == DSC_ACK) {
+ /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
+ break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
}
}
- //Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified
- //No Break Here . Intentionally!
+ /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
+ /* No Break Here . Intentionally! */
case eAddPHSRule:
case eSetPHSRule:
{
- if(psfCSType->cPhsRule.u8PHSI)
- {
- //Apply This PHS Rule to all classifiers whose Associated PHSI Match
+ if (psfCSType->cPhsRule.u8PHSI) {
+ /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
unsigned int uiClassifierIndex = 0;
- if(pstAddIndication->u8Direction == UPLINK_DIR )
- {
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
+ if (pstAddIndication->u8Direction == UPLINK_DIR) {
+ for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
+ if ((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
(Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
- (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Adding PHS Rule For Classifier : 0x%x cPhsRule.u8PHSI : 0x%x\n",
- Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
- psfCSType->cPhsRule.u8PHSI);
- //Update The PHS Rule for this classifier as Associated PHSI id defined
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
+ Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
+ psfCSType->cPhsRule.u8PHSI);
+ /* Update The PHS Rule for this classifier as Associated PHSI id defined */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
sPhsRule.bUnclassifiedPHSRule = FALSE;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
- //bPHSRuleAssociated = TRUE;
- //Store The PHS Rule for this classifier
+ /* bPHSRuleAssociated = TRUE; */
+ /* Store The PHS Rule for this classifier */
PhsUpdateClassifierRule(
&Adapter->stBCMPhsContext,
@@ -848,184 +701,157 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
&sPhsRule,
Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI);
- //Update PHS Rule For the Classifier
- if(sPhsRule.u8PHSI)
- {
+ /* Update PHS Rule For the Classifier */
+ if (sPhsRule.u8PHSI) {
Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
- memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule,&sPhsRule,sizeof(S_PHS_RULE));
+ memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
}
-
}
}
+ } else {
+ /* Error PHS Rule specified in signaling could not be applied to any classifier */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
+ sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
+ sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
+ sPhsRule.u8RefCnt = 0;
+ sPhsRule.bUnclassifiedPHSRule = TRUE;
+ sPhsRule.PHSModifiedBytes = 0;
+ sPhsRule.PHSModifiedNumPackets = 0;
+ sPhsRule.PHSErrorNumPackets = 0;
+ /* Store The PHS Rule for this classifier */
+
+ /*
+ * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
+ * clsid will be zero hence we can't have multiple PHS rules for the same SF.
+ * To support multiple PHS rule, passing u8PHSI.
+ */
+ PhsUpdateClassifierRule(
+ &Adapter->stBCMPhsContext,
+ uVCID,
+ sPhsRule.u8PHSI,
+ &sPhsRule,
+ sPhsRule.u8PHSI);
}
- else
- {
- //Error PHS Rule specified in signaling could not be applied to any classifier
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
- sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
- sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
- sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
- sPhsRule.u8RefCnt = 0;
- sPhsRule.bUnclassifiedPHSRule = TRUE;
- sPhsRule.PHSModifiedBytes = 0;
- sPhsRule.PHSModifiedNumPackets = 0;
- sPhsRule.PHSErrorNumPackets = 0;
- //Store The PHS Rule for this classifier
-
- /*
- Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
- clsid will be zero hence we can't have multiple PHS rules for the same SF.
- To support multiple PHS rule, passing u8PHSI.
- */
-
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- sPhsRule.u8PHSI,
- &sPhsRule,
- sPhsRule.u8PHSI);
-
- }
-
}
}
break;
}
}
- if(psfLocalSet->u32MaxSustainedTrafficRate == 0 )
- {
- //No Rate Limit . Set Max Sustained Traffic Rate to Maximum
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
-
- }
- else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) >
- WIMAX_MAX_ALLOWED_RATE)
- {
- //Too large Allowed Rate specified. Limiting to Wi Max Allowed rate
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
- }
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
+ if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
+ /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
+ /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
}
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
-
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED;
+ if ((Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS))
+ UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
- if(( Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS ) )
- UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
-
- if(UGIValue == 0)
+ if (UGIValue == 0)
UGIValue = DEFAULT_UG_INTERVAL;
/*
- For UGI based connections...
- DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
- The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
- In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
- */
-
+ * For UGI based connections...
+ * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
+ * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
+ * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
+ */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8)
- {
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) {
UINT UGIFactor = 0;
/* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
- 1. Any packet from Host to FW can go out in different packet size.
- 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
- 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
- */
+ * 1. Any packet from Host to FW can go out in different packet size.
+ * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
+ * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
+ */
UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1);
- if(UGIFactor > DEFAULT_UGI_FACTOR)
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ if (UGIFactor > DEFAULT_UGI_FACTOR)
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
+ (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8;
}
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "LAT: %d, UGI: %d\n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
+ ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"LAT: %d, UGI: %d \n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
-
- //copy the extended SF Parameters to Support MIBS
- CopyMIBSExtendedSFParameters(Adapter,psfLocalSet,uiSearchRuleIndex);
+ /* copy the extended SF Parameters to Support MIBS */
+ CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
- //store header suppression enabled flag per SF
+ /* store header suppression enabled flag per SF */
Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled =
- !(psfLocalSet->u8RequesttransmissionPolicy &
- MASK_DISABLE_HEADER_SUPPRESSION);
+ !(psfLocalSet->u8RequesttransmissionPolicy &
+ MASK_DISABLE_HEADER_SUPPRESSION);
kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
- //Re Sort the SF list in PackInfo according to Traffic Priority
+ /* Re Sort the SF list in PackInfo according to Traffic Priority */
SortPackInfo(Adapter);
/* Re Sort the Classifier Rules table and re - arrange
- according to Classifier Rule Priority */
+ * according to Classifier Rule Priority
+ */
SortClassifiers(Adapter);
-
DumpPhsRules(&Adapter->stBCMPhsContext);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s <=====", __FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s <=====", __func__);
}
-
/***********************************************************************
-* Function - DumpCmControlPacket
-*
-* Description - This routinue Dumps the Contents of the AddIndication
-* Structure in the Connection Management Control Packet
-*
-* Parameter - pvBuffer: Pointer to the buffer containing the
-* AddIndication data.
-*
-* Returns - None
-*************************************************************************/
+ * Function - DumpCmControlPacket
+ *
+ * Description - This routinue Dumps the Contents of the AddIndication
+ * Structure in the Connection Management Control Packet
+ *
+ * Parameter - pvBuffer: Pointer to the buffer containing the
+ * AddIndication data.
+ *
+ * Returns - None
+ *************************************************************************/
static VOID DumpCmControlPacket(PVOID pvBuffer)
{
- UINT uiLoopIndex;
- UINT nIndex;
- stLocalSFAddIndicationAlt *pstAddIndication;
- UINT nCurClassifierCnt;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int uiLoopIndex;
+ int nIndex;
+ stLocalSFAddIndicationAlt *pstAddIndication;
+ UINT nCurClassifierCnt;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction : 0x%X",pstAddIndication->u8Direction);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",htons(pstAddIndication->sfAuthorizedSet.u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
@@ -1033,207 +859,170 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%X, %p",
- pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%X 0x%p",
- pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
+ pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
+ pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
&pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8PagingPreference);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
- *(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
- *(unsigned int*)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
- *(USHORT*) &pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
+ pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
+ *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
+ *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
+ *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
- if(!pstAddIndication->sfAuthorizedSet.bValid)
- pstAddIndication->sfAuthorizedSet.bValid=1;
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
+ if (!pstAddIndication->sfAuthorizedSet.bValid)
+ pstAddIndication->sfAuthorizedSet.bValid = 1;
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%02X",pstAddIndication->sfAuthorizedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfAdmittedSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfAdmittedSet.u16CID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
pstAddIndication->sfAdmittedSet.u8ServiceClassName[0],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[1],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[2],
@@ -1241,429 +1030,338 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAdmittedSet.u8ServiceClassName[4],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%02X",
- pstAddIndication->sfAdmittedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQEnable);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAdmittedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAdmittedSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%02X",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%X",pstAddIndication->sfAdmittedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfActiveSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfActiveSet.u16CID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
- pstAddIndication->sfActiveSet.u8ServiceClassName[0],
- pstAddIndication->sfActiveSet.u8ServiceClassName[1],
- pstAddIndication->sfActiveSet.u8ServiceClassName[2],
- pstAddIndication->sfActiveSet.u8ServiceClassName[3],
- pstAddIndication->sfActiveSet.u8ServiceClassName[4],
- pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfActiveSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfActiveSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfActiveSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfActiveSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfActiveSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfActiveSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID : 0x%X",
- pstAddIndication->sfActiveSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification : 0x%X",
- pstAddIndication->sfActiveSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase : 0x%X",
- pstAddIndication->sfActiveSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfActiveSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
+ pstAddIndication->sfActiveSet.u8ServiceClassName[0],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[1],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[2],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[3],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[4],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0;uiLoopIndex<32;uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID :0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI :0x%X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol: 0x%X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6] :0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid : 0x%X",pstAddIndication->sfActiveSet.bValid);
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
}
-static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet,PUCHAR pucDestBuffer)
+static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
{
UINT nBytesToRead = sizeof(stServiceFlowParamSI);
- if(ulAddrSFParamSet == 0 || NULL == pucDestBuffer)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
+ if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
return 0;
}
ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
- //Read out the SF Param Set At the indicated Location
- if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
+ /* Read out the SF Param Set At the indicated Location */
+ if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
return STATUS_FAILURE;
return 1;
}
-
-static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
+static ULONG StoreSFParam(PMINI_ADAPTER Adapter, PUCHAR pucSrcBuffer, ULONG ulAddrSFParamSet)
{
- UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
+ UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
int ret = 0;
- if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
- {
+ if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
return 0;
- }
ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
if (ret < 0) {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed",__FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed", __func__, __LINE__);
return ret;
}
return 1;
}
-ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength)
+ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter, PVOID pvBuffer, UINT *puBufferLength)
{
stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL;
- stLocalSFAddIndication * pstAddIndication = NULL;
+ stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFDeleteRequest *pstDeletionRequest;
UINT uiSearchRuleIndex;
ULONG ulSFID;
@@ -1671,52 +1369,51 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer);
/*
- * In case of DSD Req By MS, we should immediately delete this SF so that
- * we can stop the further classifying the pkt for this SF.
- */
- if(pstAddIndicationAlt->u8Type == DSD_REQ)
- {
+ * In case of DSD Req By MS, we should immediately delete this SF so that
+ * we can stop the further classifying the pkt for this SF.
+ */
+ if (pstAddIndicationAlt->u8Type == DSD_REQ) {
pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer;
ulSFID = ntohl(pstDeletionRequest->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
Adapter->u32TotalDSD++;
}
return 1;
}
-
- if( (pstAddIndicationAlt->u8Type == DSD_RSP) ||
- (pstAddIndicationAlt->u8Type == DSD_ACK))
- {
- //No Special handling send the message as it is
+ if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
+ (pstAddIndicationAlt->u8Type == DSD_ACK)) {
+ /* No Special handling send the message as it is */
return 1;
}
- // For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver!
+ /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
- pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
- if(NULL==pstAddIndication)
+ pstAddIndication = kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
+ if (pstAddIndication == NULL)
return 0;
/* AUTHORIZED SET */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAuthorizedSet)
+ if (!pstAddIndication->psfAuthorizedSet) {
+ kfree(pstAddIndication);
return 0;
+ }
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
- (ULONG)pstAddIndication->psfAuthorizedSet)!= 1)
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
+ (ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
/* this can't possibly be right */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet);
- if(pstAddIndicationAlt->u8Type == DSA_REQ)
- {
+ if (pstAddIndicationAlt->u8Type == DSA_REQ) {
stLocalSFAddRequest AddRequest;
AddRequest.u8Type = pstAddIndicationAlt->u8Type;
@@ -1724,18 +1421,18 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
AddRequest.u16TID = pstAddIndicationAlt->u16TID;
AddRequest.u16CID = pstAddIndicationAlt->u16CID;
AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
- AddRequest.psfParameterSet =pstAddIndication->psfAuthorizedSet ;
+ AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
(*puBufferLength) = sizeof(stLocalSFAddRequest);
- memcpy(pvBuffer,&AddRequest,sizeof(stLocalSFAddRequest));
+ memcpy(pvBuffer, &AddRequest, sizeof(stLocalSFAddRequest));
+ kfree(pstAddIndication);
return 1;
}
- // Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt
-
- //We need to extract the structure from the buffer and pack it differently
+ /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
+ /* We need to extract the structure from the buffer and pack it differently */
pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
- pstAddIndication->eConnectionDir= pstAddIndicationAlt->u8Direction ;
+ pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
@@ -1744,21 +1441,28 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
/* ADMITTED SET */
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAdmittedSet)
+ if (!pstAddIndication->psfAdmittedSet) {
+ kfree(pstAddIndication);
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,(ULONG)pstAddIndication->psfAdmittedSet) != 1)
+ }
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet);
-
/* ACTIVE SET */
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfActiveSet)
+ if (!pstAddIndication->psfActiveSet) {
+ kfree(pstAddIndication);
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfActiveSet,(ULONG)pstAddIndication->psfActiveSet) != 1)
+ }
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, (ULONG)pstAddIndication->psfActiveSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet);
@@ -1768,47 +1472,41 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
return 1;
}
-
static inline stLocalSFAddIndicationAlt
-*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter,register PVOID pvBuffer)
+*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter, register PVOID pvBuffer)
{
- ULONG ulStatus=0;
+ ULONG ulStatus = 0;
stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL;
- pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>" );
+ pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
if ((pstAddIndication->u8Type == DSD_REQ) ||
(pstAddIndication->u8Type == DSD_RSP) ||
(pstAddIndication->u8Type == DSD_ACK))
- {
return (stLocalSFAddIndicationAlt *)pvBuffer;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack :(
- */
- pstAddIndicationDest=kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
-
- if(pstAddIndicationDest)
- {
- memset(pstAddIndicationDest,0,sizeof(stLocalSFAddIndicationAlt));
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack :(
+ */
+ pstAddIndicationDest = kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
+
+ if (pstAddIndicationDest) {
+ memset(pstAddIndicationDest, 0, sizeof(stLocalSFAddIndicationAlt));
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
return NULL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X",pstAddIndication->eConnectionDir);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X",ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p",pstAddIndication->psfAuthorizedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p",pstAddIndication->psfAdmittedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p",pstAddIndication->psfActiveSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X", pstAddIndication->eConnectionDir);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p", pstAddIndication->psfAuthorizedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p", pstAddIndication->psfAdmittedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p", pstAddIndication->psfActiveSet);
pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
@@ -1817,42 +1515,39 @@ static inline stLocalSFAddIndicationAlt
pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAdmittedSet,(PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAdmittedSet, (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAuthorizedSet,(PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAuthorizedSet, (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
- //BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
+ /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
return pstAddIndicationDest;
failed_restore_sf_param:
kfree(pstAddIndicationDest);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====");
return NULL;
}
@@ -1860,7 +1555,7 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
{
ULONG ulTargetDsxBuffersBase = 0;
ULONG ulCntTargetBuffers;
- ULONG ulIndex=0;
+ ULONG i;
int Status;
if (!Adapter) {
@@ -1868,411 +1563,354 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
return 0;
}
- if(Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
+ if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
return 1;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ",sizeof(stServiceFlowParamSI));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ",DSX_MESSAGE_EXCHANGE_BUFFER);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ", sizeof(stServiceFlowParamSI));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ", DSX_MESSAGE_EXCHANGE_BUFFER);
- Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER,
- (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
- if(Status < 0)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
+ Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
+ if (Status < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
return 0;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx",ulTargetDsxBuffersBase);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :",ulTargetDsxBuffersBase);
-
- ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE/sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx", ulTargetDsxBuffersBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
+ ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / sizeof(stServiceFlowParamSI);
Adapter->ulTotalTargetBuffersAvailable =
ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ",Adapter->ulTotalTargetBuffersAvailable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ", Adapter->ulTotalTargetBuffersAvailable);
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable ; ulIndex++)
- {
- Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
- ulTargetDsxBuffersBase+=sizeof(stServiceFlowParamSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
- ulIndex, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
+ ulTargetDsxBuffersBase += sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
+ i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
}
Adapter->ulCurrentTargetBuffer = 0;
Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
return 1;
}
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid)
{
- ULONG ulTargetDSXBufferAddress;
- ULONG ulTargetDsxBufferIndexToUse,ulMaxTry;
+ ULONG ulTargetDSXBufferAddress;
+ ULONG ulTargetDsxBufferIndexToUse, ulMaxTry;
- if((Adapter->ulTotalTargetBuffersAvailable == 0)||
- (Adapter->ulFreeTargetBufferCnt == 0))
- {
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
- ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
- ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
- while((ulMaxTry)&&(Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1))
- {
- ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%
- Adapter->ulTotalTargetBuffersAvailable;
- ulMaxTry--;
+ ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
+ ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
+ while ((ulMaxTry) && (Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) {
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1) % Adapter->ulTotalTargetBuffersAvailable;
+ ulMaxTry--;
}
- if(ulMaxTry==0)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if (ulMaxTry == 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
-
- ulTargetDSXBufferAddress =
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid=0;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid=tid;
+ ulTargetDSXBufferAddress = Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid = 0;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid = tid;
Adapter->ulFreeTargetBufferCnt--;
-
-
- ulTargetDsxBufferIndexToUse =
- (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n",
- ulTargetDSXBufferAddress,tid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", ulTargetDSXBufferAddress, tid);
+
return ulTargetDSXBufferAddress;
}
-
-INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack
- */
- Adapter->caDsxReqResp=kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
- if(!Adapter->caDsxReqResp)
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack
+ */
+ Adapter->caDsxReqResp = kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
+ if (!Adapter->caDsxReqResp)
return -ENOMEM;
+
return 0;
}
-INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
kfree(Adapter->caDsxReqResp);
return 0;
-
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would process the Control responses
-for the Connection Management.
-@return - Queue index for the free SFID else returns Invalid Index.
-*/
-BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- PVOID pvBuffer /**Starting Address of the Buffer, that contains the AddIndication Data*/
- )
+
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would process the Control responses
+ * for the Connection Management.
+ * @return - Queue index for the free SFID else returns Invalid Index.
+ */
+BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
{
- stServiceFlowParamSI *psfLocalSet=NULL;
- stLocalSFAddIndicationAlt *pstAddIndication = NULL;
- stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
- PLEADER pLeader=NULL;
+ stServiceFlowParamSI *psfLocalSet = NULL;
+ stLocalSFAddIndicationAlt *pstAddIndication = NULL;
+ stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
+ PLEADER pLeader = NULL;
+
/*
- //Otherwise the message contains a target address from where we need to
- //read out the rest of the service flow param structure
- */
- if((pstAddIndication = RestoreCmControlResponseMessage(Adapter,pvBuffer))
- == NULL)
- {
- ClearTargetDSXBuffer(Adapter,((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
+ * Otherwise the message contains a target address from where we need to
+ * read out the rest of the service flow param structure
+ */
+ pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
+ if (pstAddIndication == NULL) {
+ ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
return FALSE;
}
DumpCmControlPacket(pstAddIndication);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
pLeader = (PLEADER)Adapter->caDsxReqResp;
- pLeader->Status =CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
+ pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
pLeader->Vcid = 0;
- ClearTargetDSXBuffer(Adapter,pstAddIndication->u16TID,FALSE);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n",pstAddIndication->u16TID);
- switch(pstAddIndication->u8Type)
+ ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
+ switch (pstAddIndication->u8Type) {
+ case DSA_REQ:
{
- case DSA_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength );
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- }
- break;
- case DSA_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSA_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
- }//no break here..we should go down.
- case DSA_ACK:
- {
- UINT uiSearchRuleIndex=0;
+ } /* no break here..we should go down. */
+ case DSA_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
- uiSearchRuleIndex=SearchFreeSfid(Adapter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiSearchRuleIndex:0x%X ",
+ uiSearchRuleIndex = SearchFreeSfid(Adapter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Direction:0x%X ",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
- if((uiSearchRuleIndex< NO_OF_QUEUES) )
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
- pstAddIndication->u8Direction;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
+ pstAddIndication->u8Direction;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
pstAddIndication->sfActiveSet.bValid);
- if(pstAddIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstAddIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstAddIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
- if(FALSE == pstAddIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstAddIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAdmittedSet;
- }
- else if(pstAddIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAuthorizedSet;
- }
- }
- else
- {
- psfLocalSet = &pstAddIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
-
- if(!psfLocalSet)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
+ if (pstAddIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstAddIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+ if (pstAddIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAdmittedSet;
+ else if (pstAddIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstAddIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
+ }
- else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstAddIndication->u16VCID);
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstAddIndication->u16CID);
-
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSA_ACK, pstAddIndication);
- // don't free pstAddIndication
-
- /* Inside CopyToAdapter, Sorting of all the SFs take place.
- Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
- SHOULD BE STRICTLY AVOIDED.
- */
-// *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
-
- if(pstAddIndication->sfActiveSet.bValid == TRUE)
- {
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- {
- if(!Adapter->LinkUpStatus)
- {
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- Adapter->LinkUpStatus = 1;
- if (netif_msg_link(Adapter))
- pr_info(PFX "%s: link up\n", Adapter->dev->name);
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = get_seconds();
- }
+ if (!psfLocalSet) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
+ kfree(pstAddIndication);
+ } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
+
+ if (UPLINK_DIR == pstAddIndication->u8Direction)
+ atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
+
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
+ /* don't free pstAddIndication */
+
+ /* Inside CopyToAdapter, Sorting of all the SFs take place.
+ * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
+ * SHOULD BE STRICTLY AVOIDED.
+ */
+ /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
+ memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
+
+ if (pstAddIndication->sfActiveSet.bValid == TRUE) {
+ if (UPLINK_DIR == pstAddIndication->u8Direction) {
+ if (!Adapter->LinkUpStatus) {
+ netif_carrier_on(Adapter->dev);
+ netif_start_queue(Adapter->dev);
+ Adapter->LinkUpStatus = 1;
+ if (netif_msg_link(Adapter))
+ pr_info(PFX "%s: link up\n", Adapter->dev->name);
+ atomic_set(&Adapter->TxPktAvail, 1);
+ wake_up(&Adapter->tx_packet_wait_queue);
+ Adapter->liTimeSinceLastNetEntry = get_seconds();
}
}
}
-
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
- return FALSE;
}
- }
- break;
- case DSC_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
-
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
-
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSC_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- }
- case DSC_ACK:
- {
- UINT uiSearchRuleIndex=0;
+ }
+ break;
+ case DSC_REQ:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
- pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
- uiSearchRuleIndex=SearchSfid(Adapter,ntohl(pstChangeIndication->sfActiveSet.u32SFID));
- if(uiSearchRuleIndex > NO_OF_QUEUES-1)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
- }
- if((uiSearchRuleIndex < NO_OF_QUEUES))
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
- if(pstChangeIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstChangeIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstChangeIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
- if(FALSE==pstChangeIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstChangeIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAdmittedSet;
- }
- else if(pstChangeIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
- }
- }
-
- else
- {
- psfLocalSet = &pstChangeIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
- if(psfLocalSet->bValid && (pstChangeIndication->u8CC == 0))
- {
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstChangeIndication->u16VCID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
- pstChangeIndication->u8CC, psfLocalSet->bValid);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstChangeIndication->u16CID);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSC_ACK, pstAddIndication);
-
- *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- }
- else if(pstChangeIndication->u8CC == 6)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- kfree(pstAddIndication);
- }
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSC_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
+ }
+ case DSC_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
+
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
+ if (uiSearchRuleIndex > NO_OF_QUEUES-1)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
+
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
+ if (pstChangeIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAdmittedSet;
+ else if (pstChangeIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstChangeIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+
+ if (!psfLocalSet) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
+ kfree(pstAddIndication);
+ } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
+ pstChangeIndication->u8CC, psfLocalSet->bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
+
+ *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
+ } else if (pstChangeIndication->u8CC == 6) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
kfree(pstAddIndication);
- return FALSE;
}
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+ kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSD_REQ:
- {
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
- pLeader->PLength = sizeof(stLocalSFDeleteIndication);
- *((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication*)pstAddIndication);
+ }
+ break;
+ case DSD_REQ:
+ {
+ UINT uiSearchRuleIndex;
+ ULONG ulSFID;
- ulSFID = ntohl(((stLocalSFDeleteIndication*)pstAddIndication)->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x",uiSearchRuleIndex);
+ pLeader->PLength = sizeof(stLocalSFDeleteIndication);
+ *((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication *)pstAddIndication);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- //Delete All Classifiers Associated with this SFID
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
+ ulSFID = ntohl(((stLocalSFDeleteIndication *)pstAddIndication)->u32SFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
- ((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- }
- case DSD_RSP:
- {
- //Do nothing as SF has already got Deleted
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ /* Delete All Classifiers Associated with this SFID */
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ Adapter->u32TotalDSD++;
}
- break;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
+ ((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ }
+ case DSD_RSP:
+ {
+ /* Do nothing as SF has already got Deleted */
+ }
+ break;
case DSD_ACK:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
+ break;
default:
kfree(pstAddIndication);
- return FALSE ;
+ return FALSE;
}
return TRUE;
}
@@ -2280,78 +1918,67 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer)
{
int status = 0;
- struct _packet_info *psSfInfo=NULL;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
+ struct _packet_info *psSfInfo = NULL;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
status = SearchSfid(Adapter, uiSFId);
if (status >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId);
return -EINVAL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
- psSfInfo=&Adapter->PackInfo[status];
- if(psSfInfo->pstSFIndication && copy_to_user(user_buffer,
- psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
+ psSfInfo = &Adapter->PackInfo[status];
+ if (psSfInfo->pstSFIndication && copy_to_user(user_buffer,
+ psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId);
status = -EFAULT;
return status;
}
return STATUS_SUCCESS;
}
-VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter,PUINT puiBuffer)
+VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter, PUINT puiBuffer)
{
- B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
+ B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
stIM_SFHostNotify *pHostInfo = NULL;
- UINT uiSearchRuleIndex = 0;
- ULONG ulSFID = 0;
+ UINT uiSearchRuleIndex = 0;
+ ULONG ulSFID = 0;
- puiBuffer+=2;
+ puiBuffer += 2;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n",u32NumofSFsinMsg);
-
- while(u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES)
- {
+ while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
u32NumofSFsinMsg--;
pHostInfo = (stIM_SFHostNotify *)puiBuffer;
puiBuffer = (PUINT)(pHostInfo + 1);
ulSFID = ntohl(pHostInfo->SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"SFID: 0x%lx\n",ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID: 0x%lx\n", ulSFID);
- if(uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
+ if (uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
continue;
}
- if(pHostInfo->RetainSF == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Going to Delete SF");
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- }
- else
- {
-
+ if (pHostInfo->RetainSF == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ } else {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"pHostInfo->QoSParamSet: 0x%x\n",pHostInfo->QoSParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
- if(pHostInfo->QoSParamSet & 0x1)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x2)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x4)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet =TRUE;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
+ if (pHostInfo->QoSParamSet & 0x1)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x2)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x4) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
}
}
}
-
-
-
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
index 0711ac20f6f..ed8fbc09111 100644
--- a/drivers/staging/bcm/led_control.h
+++ b/drivers/staging/bcm/led_control.h
@@ -4,11 +4,11 @@
/*************************TYPE DEF**********************/
#define NUM_OF_LEDS 4
-#define DSD_START_OFFSET 0x0200
-#define EEPROM_VERSION_OFFSET 0x020E
-#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
-#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
-#define GPIO_SECTION_START_OFFSET 0x03
+#define DSD_START_OFFSET 0x0200
+#define EEPROM_VERSION_OFFSET 0x020E
+#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
+#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
+#define GPIO_SECTION_START_OFFSET 0x03
#define COMPATIBILITY_SECTION_LENGTH 42
#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
@@ -18,27 +18,27 @@
#define EEPROM_MAP5_MINORVERSION 0
-#define MAX_NUM_OF_BLINKS 10
-#define NUM_OF_GPIO_PINS 16
+#define MAX_NUM_OF_BLINKS 10
+#define NUM_OF_GPIO_PINS 16
-#define DISABLE_GPIO_NUM 0xFF
-#define EVENT_SIGNALED 1
+#define DISABLE_GPIO_NUM 0xFF
+#define EVENT_SIGNALED 1
-#define MAX_FILE_NAME_BUFFER_SIZE 100
+#define MAX_FILE_NAME_BUFFER_SIZE 100
-#define TURN_ON_LED(GPIO, index) do{ \
+#define TURN_ON_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG, &gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define TURN_OFF_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG,&gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG,&gpio_val ,sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define B_ULONG32 unsigned long
@@ -50,7 +50,7 @@ typedef enum _LEDColors{
BLUE_LED = 2,
YELLOW_LED = 3,
GREEN_LED = 4
-} LEDColors; /*Enumerated values of different LED types*/
+} LEDColors; /*Enumerated values of different LED types*/
typedef enum LedEvents {
SHUTDOWN_EXIT = 0x00,
@@ -62,43 +62,39 @@ typedef enum LedEvents {
LOWPOWER_MODE_ENTER = 0x20,
IDLEMODE_CONTINUE = 0x40,
IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, //Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold.
- LED_THREAD_ACTIVE = 0x200 //Makes the LED Thread Active back.
-} LedEventInfo_t; /*Enumerated values of different driver states*/
-
-#define DRIVER_HALT 0xff
-
-
-/*Structure which stores the information of different LED types
- * and corresponding LED state information of driver states*/
-typedef struct LedStateInfo_t
-{
+ LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
+ LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
+ DRIVER_HALT = 0xff
+} LedEventInfo_t; /* Enumerated values of different driver states */
+
+/*
+ * Structure which stores the information of different LED types
+ * and corresponding LED state information of driver states
+ */
+typedef struct LedStateInfo_t {
UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */
UCHAR LED_On_State; /* Bits set or reset for different states */
UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
UCHAR GPIO_Num;
- UCHAR BitPolarity; /*To represent whether H/W is normal polarity or reverse
- polarity*/
-}LEDStateInfo, *pLEDStateInfo;
+ UCHAR BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
+} LEDStateInfo, *pLEDStateInfo;
-typedef struct _LED_INFO_STRUCT
-{
+typedef struct _LED_INFO_STRUCT {
LEDStateInfo LEDState[NUM_OF_LEDS];
- BOOLEAN bIdleMode_tx_from_host; /*Variable to notify whether driver came out
- from idlemode due to Host or target*/
+ BOOLEAN bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target*/
BOOLEAN bIdle_led_off;
wait_queue_head_t notify_led_event;
wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
+ struct task_struct *led_cntrl_threadid;
+ int led_thread_running;
BOOLEAN bLedInitDone;
} LED_INFO_STRUCT, *PLED_INFO_STRUCT;
-//LED Thread state.
-#define BCM_LED_THREAD_DISABLED 0 //LED Thread is not running.
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 //LED thread is running.
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 //LED thread has been put on hold
+/* LED Thread state. */
+#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
+#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
+#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /*LED thread has been put on hold*/
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 4c77e508066..12c691d9090 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -765,8 +765,9 @@ config COMEDI_ADV_PCI_DIO
default N
---help---
Enable support for Advantech PCI DIO cards
- PCI-1730, PCI-1733, PCI-1734, PCI-1736UP, PCI-1750, PCI-1751,
- PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and PCI-1762
+ PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
+ PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
+ PCI-1760 and PCI-1762
To compile this driver as a module, choose M here: the module will be
called adv_pci_dio.
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 537e5853427..7af068f4a74 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -8,16 +8,16 @@
/*
Driver: adv_pci_dio
Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
- PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E,
- PCI-1754, PCI-1756, PCI-1762
+ PCI-1736UP, PCI-1739U, PCI-1750, PCI-1751, PCI-1752,
+ PCI-1753/E, PCI-1754, PCI-1756, PCI-1760, PCI-1762
Author: Michal Dobes <dobes@tesnet.cz>
Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
- PCI-1734, PCI-1735U, PCI-1736UP, PCI-1750,
+ PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
PCI-1751, PCI-1752, PCI-1753,
PCI-1753+PCI-1753E, PCI-1754, PCI-1756,
PCI-1760, PCI-1762
Status: untested
-Updated: Tue, 04 May 2010 13:00:00 +0000
+Updated: Mon, 09 Jan 2012 12:40:46 +0000
This driver supports now only insn interface for DI/DO/DIO.
@@ -51,6 +51,7 @@ Configuration options:
/* hardware types of the cards */
enum hw_cards_id {
TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736,
+ TYPE_PCI1739,
TYPE_PCI1750,
TYPE_PCI1751,
TYPE_PCI1752,
@@ -109,6 +110,12 @@ enum hw_io_access {
#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
+/* Advantech PCI-1739U */
+#define PCI1739_DIO 0 /* R/W: begin of 8255 registers block */
+#define PCI1739_ICR 32 /* W: Interrupt control register */
+#define PCI1739_ISR 32 /* R: Interrupt status register */
+#define PCI1739_BOARDID 8 /* R: Board I/D switch for 1739U */
+
/* Advantech PCI-1750 */
#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */
#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */
@@ -262,6 +269,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_dio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1734) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1735) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1736) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1739) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1750) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1751) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1752) },
@@ -316,6 +324,14 @@ static const struct dio_boardtype boardtypes[] = {
{4, PCI1736_BOARDID, 1, SDF_INTERNAL},
{ {0, 0, 0, 0} },
IO_8b},
+ {"pci1739", PCI_VENDOR_ID_ADVANTECH, 0x1739, PCIDIO_MAINREG,
+ TYPE_PCI1739,
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {48, PCI1739_DIO, 2, 0}, {0, 0, 0, 0} },
+ {0, 0, 0, 0},
+ { {0, 0, 0, 0} },
+ IO_8b},
{"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG,
TYPE_PCI1750,
{ {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} },
@@ -883,6 +899,11 @@ static int pci_dio_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCI1736_3_INT_RF);
break;
+ case TYPE_PCI1739:
+ /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1739_ICR);
+ break;
+
case TYPE_PCI1750:
case TYPE_PCI1751:
/* disable & clear interrupts */
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 5cce1b5f448..b85c8366a39 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -720,12 +720,20 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
which = 1;
/* configure */
- if (data[0]) {
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_OUTPUT:
s->io_bits = 0xff;
dt2801_writecmd(dev, DT_C_SET_DIGOUT);
- } else {
+ break;
+ case INSN_CONFIG_DIO_INPUT:
s->io_bits = 0;
dt2801_writecmd(dev, DT_C_SET_DIGIN);
+ break;
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
+ return insn->n;
+ default:
+ return -EINVAL;
}
dt2801_writedata(dev, which);
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 32d9c42e965..e86ab586289 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -527,7 +527,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
* 11x -> Gain = 0.5
*/
case DT9812_GAIN_0PT5:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_1:
@@ -540,7 +540,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
rmw->or_value = F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_8:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
F020_MASK_ADC0CF_AMP0GN0;
break;
case DT9812_GAIN_16:
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index b692fea0d2b..b0bc6bb877a 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -2098,23 +2098,29 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
CALL_PDEBUG("In me4000_dio_insn_config()\n");
- if (data[0] == INSN_CONFIG_DIO_QUERY) {
+ switch (data[0]) {
+ default:
+ return -EINVAL;
+ case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
return insn->n;
+ case INSN_CONFIG_DIO_INPUT:
+ case INSN_CONFIG_DIO_OUTPUT:
+ break;
}
/*
* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT.
+ * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
* On the ME-4000 it is only possible to switch port wise (8 bit)
*/
tmp = me4000_inl(dev, info->dio_context.ctrl_reg);
- if (data[0] == COMEDI_OUTPUT) {
+ if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
if (chan < 8) {
s->io_bits |= 0xFF;
tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 045a4c00f34..1df8fcbcd10 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -30,7 +30,7 @@ Status: works
Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio), PXI-6533,
PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X,
PXI-6503, PCI-6533, PCI-6534
-Updated: Sun, 21 Apr 2002 21:03:38 -0700
+Updated: Mon, 09 Jan 2012 14:27:23 +0000
The DIO-96 appears as four 8255 subdevices. See the 8255
driver notes for details.
@@ -42,6 +42,11 @@ supports simple digital I/O; no handshaking is supported.
DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
+The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
+scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
+scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
+trailing edge.
+
This driver could be easily modified to support AT-MIO32HS and
AT-MIO96.
@@ -436,6 +441,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
comedi_error(dev, "failed to reserve mite dma channel.");
return -EBUSY;
}
+ devpriv->di_mite_chan->dir = COMEDI_INPUT;
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
devpriv->mite->daq_io_addr + DMA_Line_Control_Group1);
@@ -482,6 +488,21 @@ void ni_pcidio_event(struct comedi_device *dev, struct comedi_subdevice *s)
comedi_event(dev, s);
}
+static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ unsigned long irq_flags;
+ int count;
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
+ if (devpriv->di_mite_chan)
+ mite_sync_input_dma(devpriv->di_mite_chan, s->async);
+ spin_unlock(&devpriv->mite_channel_lock);
+ count = s->async->buf_write_count - s->async->buf_read_count;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return count;
+}
+
static irqreturn_t nidio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -497,7 +518,6 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
int status;
int work = 0;
unsigned int m_status = 0;
- unsigned long irq_flags;
/* interrupcions parasites */
if (dev->attached == 0) {
@@ -505,6 +525,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
return IRQ_NONE;
}
+ /* Lock to avoid race with comedi_poll */
+ spin_lock(&dev->spinlock);
+
status = readb(devpriv->mite->daq_io_addr +
Interrupt_And_Window_Status);
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
@@ -518,7 +541,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
/* printk("buf[4096]=%08x\n",
*(unsigned int *)(async->prealloc_buf+4096)); */
- spin_lock_irqsave(&devpriv->mite_channel_lock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
m_status = mite_get_status(devpriv->di_mite_chan);
#ifdef MITE_DEBUG
@@ -543,7 +566,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
disable_irq(dev->irq);
}
}
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, irq_flags);
+ spin_unlock(&devpriv->mite_channel_lock);
while (status & DataLeft) {
work++;
@@ -645,6 +668,8 @@ out:
Master_DMA_And_Interrupt_Control);
}
#endif
+
+ spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
@@ -825,8 +850,8 @@ static int ni_pcidio_cmdtest(struct comedi_device *dev,
} else {
/* TRIG_EXT */
/* should be level/edge, hi/lo specification here */
- if (cmd->scan_begin_arg != 0) {
- cmd->scan_begin_arg = 0;
+ if ((cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) != 0) {
+ cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
err++;
}
}
@@ -941,7 +966,13 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writeb(0, devpriv->mite->daq_io_addr + Sequence);
writeb(0x00, devpriv->mite->daq_io_addr + ReqReg);
writeb(4, devpriv->mite->daq_io_addr + BlockMode);
- writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ if (!(cmd->scan_begin_arg & CR_INVERT)) {
+ /* Leading Edge pulse mode */
+ writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ } else {
+ /* Trailing Edge pulse mode */
+ writeb(2, devpriv->mite->daq_io_addr + LinePolarities);
+ }
writeb(0x00, devpriv->mite->daq_io_addr + AckSer);
writel(1, devpriv->mite->daq_io_addr + StartDelay);
writeb(1, devpriv->mite->daq_io_addr + ReqDelay);
@@ -1005,17 +1036,24 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
int retval;
+ unsigned long flags;
retval = ni_pcidio_request_di_mite_channel(dev);
if (retval)
return retval;
- devpriv->di_mite_chan->dir = COMEDI_INPUT;
+ /* write alloc the entire buffer */
+ comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
- mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
+ if (devpriv->di_mite_chan) {
+ mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ mite_dma_arm(devpriv->di_mite_chan);
+ } else
+ retval = -EIO;
+ spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- mite_dma_arm(devpriv->di_mite_chan);
- return 0;
+ return retval;
}
static int ni_pcidio_inttrig(struct comedi_device *dev,
@@ -1244,6 +1282,7 @@ static int nidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->len_chanlist = 32; /* XXX */
s->buf_change = &ni_pcidio_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
+ s->poll = &ni_pcidio_poll;
writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0f0d995f137..27baefa32b1 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -29,14 +29,15 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
- PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225,
- PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
+ PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
+ PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
+ PCI-6254, PCI-6259, PCIe-6259,
PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
PCI-6711, PXI-6711, PCI-6713, PXI-6713,
PXI-6071E, PCI-6070E, PXI-6070E,
PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
PCI-6143, PXI-6143
-Updated: Wed, 03 Dec 2008 10:51:47 +0000
+Updated: Mon, 09 Jan 2012 14:52:48 +0000
These boards are almost identical to the AT-MIO E series, except that
they use the PCI bus instead of ISA (i.e., AT). See the notes for
@@ -182,6 +183,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x72e8)},
{0}
};
@@ -1046,6 +1048,25 @@ static const struct ni_board_struct ni_boards[] = {
.has_8255 = 0,
},
{
+ .device_id = 0x72e8,
+ .name = "pxie-6251",
+ .n_adchan = 16,
+ .adbits = 16,
+ .ai_fifo_depth = 4095,
+ .gainlkup = ai_gain_628x,
+ .ai_speed = 800,
+ .n_aochan = 2,
+ .aobits = 16,
+ .ao_fifo_depth = 8191,
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+ .ao_speed = 357,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+ },
+ {
.device_id = 0x70b7,
.name = "pci-6254",
.n_adchan = 32,
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 89e62aa134b..f45824f0d86 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -306,7 +306,7 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
usp = kzalloc(sizeof(*usp), GFP_KERNEL);
if (usp == NULL) {
- printk(KERN_ERR "comedi%d: erorr! --> out of memory!\n", minor);
+ printk(KERN_ERR "comedi%d: error! --> out of memory!\n", minor);
return -1;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index ca6bcf8b023..63c9b6dbc31 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -39,7 +39,7 @@ Status: testing
*
*
* Revision history:
- * 0.1: inital version
+ * 0.1: initial version
* 0.2: all basic functions implemented, digital I/O only for one port
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
@@ -235,16 +235,16 @@ struct usbduxsub {
short int ao_cmd_running;
/* pwm is running */
short int pwm_cmd_running;
- /* continous aquisition */
- short int ai_continous;
- short int ao_continous;
+ /* continuous acquisition */
+ short int ai_continuous;
+ short int ao_continuous;
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
- /* counter between aquisitions */
+ /* counter between acquisitions */
unsigned int ai_counter;
unsigned int ao_counter;
/* interval in frames/uframes */
@@ -455,8 +455,8 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
/* test, if we transmit only a fixed number of samples */
- if (!(this_usbduxsub->ai_continous)) {
- /* not continous, fixed number of samples */
+ if (!(this_usbduxsub->ai_continuous)) {
+ /* not continuous, fixed number of samples */
this_usbduxsub->ai_sample_count--;
/* all samples received? */
if (this_usbduxsub->ai_sample_count < 0) {
@@ -607,8 +607,8 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb)
/* timer zero */
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
- /* handle non continous aquisition */
- if (!(this_usbduxsub->ao_continous)) {
+ /* handle non continuous acquisition */
+ if (!(this_usbduxsub->ao_continuous)) {
/* fixed number of samples */
this_usbduxsub->ao_sample_count--;
if (this_usbduxsub->ao_sample_count < 0) {
@@ -925,7 +925,7 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
- /* scanning is continous */
+ /* scanning is continuous */
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
@@ -1193,7 +1193,7 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
up(&this_usbduxsub->sem);
return -EBUSY;
}
- /* set current channel of the running aquisition to zero */
+ /* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
/* first the number of channels per time step */
@@ -1261,10 +1261,10 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->stop_src == TRIG_COUNT) {
/* data arrives as one packet */
this_usbduxsub->ai_sample_count = cmd->stop_arg;
- this_usbduxsub->ai_continous = 0;
+ this_usbduxsub->ai_continuous = 0;
} else {
- /* continous aquisition */
- this_usbduxsub->ai_continous = 1;
+ /* continuous acquisition */
+ this_usbduxsub->ai_continuous = 1;
this_usbduxsub->ai_sample_count = 0;
}
@@ -1586,7 +1586,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
/* just now we scan also in the high speed mode every frame */
/* this is due to ehci driver limitations */
if (0) { /* (this_usbduxsub->high_speed) */
- /* start immidiately a new scan */
+ /* start immediately a new scan */
/* the sampling rate is set by the coversion rate */
cmd->scan_begin_src &= TRIG_FOLLOW;
} else {
@@ -1596,7 +1596,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
- /* scanning is continous */
+ /* scanning is continuous */
tmp = cmd->convert_src;
/* all conversion events happen simultaneously */
@@ -1710,7 +1710,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s\n", dev->minor, __func__);
- /* set current channel of the running aquisition to zero */
+ /* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
for (i = 0; i < cmd->chanlist_len; ++i) {
chan = CR_CHAN(cmd->chanlist[i]);
@@ -1759,7 +1759,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
if (cmd->stop_src == TRIG_COUNT) {
- /* not continous */
+ /* not continuous */
/* counter */
/* high speed also scans everything at once */
if (0) { /* (this_usbduxsub->high_speed) */
@@ -1771,10 +1771,10 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* data arrives as one packet */
this_usbduxsub->ao_sample_count = cmd->stop_arg;
}
- this_usbduxsub->ao_continous = 0;
+ this_usbduxsub->ao_continuous = 0;
} else {
- /* continous aquisition */
- this_usbduxsub->ao_continous = 1;
+ /* continuous acquisition */
+ this_usbduxsub->ao_continuous = 1;
this_usbduxsub->ao_sample_count = 0;
}
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index bbe5119761f..fd1a6e680c8 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -48,8 +48,7 @@
#endif
-#include "bc_dts_defs.h"
-#include "bcm_70012_regs.h" /* Link Register defs */
+#include "crystalhd.h"
#define CRYSTALHD_API_NAME "crystalhd"
#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd"
diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
deleted file mode 100644
index 1085a91221b..00000000000
--- a/drivers/staging/crystalhd/bc_dts_types.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/********************************************************************
- * Copyright(c) 2006-2009 Broadcom Corporation.
- *
- * Name: bc_dts_types.h
- *
- * Description: Data types
- *
- * AU
- *
- * HISTORY:
- *
- ********************************************************************
- * This header is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 2.1 of the License.
- *
- * This header is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- * You should have received a copy of the GNU Lesser General Public License
- * along with this header. If not, see <http://www.gnu.org/licenses/>.
- *******************************************************************/
-
-#ifndef _BC_DTS_TYPES_H_
-#define _BC_DTS_TYPES_H_
-
-#include <stdint.h>
-
-#ifndef TRUE
- #define TRUE 1
-#endif
-
-#ifndef FALSE
- #define FALSE 0
-#endif
-
-#define TEXT
-
-#endif
diff --git a/drivers/staging/crystalhd/crystalhd.h b/drivers/staging/crystalhd/crystalhd.h
new file mode 100644
index 00000000000..3f4d7951502
--- /dev/null
+++ b/drivers/staging/crystalhd/crystalhd.h
@@ -0,0 +1,14 @@
+#ifndef _CRYSTALHD_H_
+#define _CRYSTALHD_H_
+
+#include <asm/system.h>
+#include "bc_dts_defs.h"
+#include "crystalhd_misc.h"
+#include "bc_dts_glob_lnx.h"
+#include "crystalhd_hw.h"
+#include "crystalhd_cmds.h"
+#include "crystalhd_lnx.h"
+#include "bcm_70012_regs.h"
+#include "crystalhd_fw_if.h"
+
+#endif
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 3735ed3da4c..05fe78748df 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -24,8 +24,7 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
-#include "crystalhd_cmds.h"
-#include "crystalhd_hw.h"
+#include "crystalhd.h"
static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx)
{
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index f0a2796045c..4066ba393a1 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -33,8 +33,8 @@
* from _dts_glob and dts_defs etc.. which are defined for
* windows.
*/
-#include "crystalhd_misc.h"
-#include "crystalhd_hw.h"
+
+#include "crystalhd.h"
enum crystalhd_state {
BC_LINK_INVALID = 0x00,
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 5acf39e7cde..e617d2fcbb1 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -22,10 +22,11 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
+#include "crystalhd.h"
+
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include "crystalhd_hw.h"
/* Functions internal to this file */
@@ -766,7 +767,7 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
if (count != xfr_sz) {
- BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
+ BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
return BC_STS_ERROR;
}
@@ -868,8 +869,7 @@ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
- /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
- if (!dma_cntrl & DMA_START_BIT) {
+ if (!(dma_cntrl & DMA_START_BIT)) {
BCMLOG(BCMLOG_DBG, "Already Stopped\n");
return BC_STS_SUCCESS;
}
@@ -1628,7 +1628,6 @@ enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, ui
uint32_t fw_sig_len = 36;
uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
- BCMLOG_ENTER;
if (!adp || !buffer || !sz) {
BCMLOG_ERR("Invalid Params.\n");
@@ -1725,8 +1724,6 @@ enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
crystalhd_create_event(&fw_cmd_event);
- BCMLOG_ENTER;
-
if (!hw || !fw_cmd) {
BCMLOG_ERR("Invalid Arguments\n");
return BC_STS_INV_ARG;
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 3efbf9d4ff5..2d0e6c6005e 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -27,8 +27,7 @@
#ifndef _CRYSTALHD_HW_H_
#define _CRYSTALHD_HW_H_
-#include "crystalhd_misc.h"
-#include "crystalhd_fw_if.h"
+#include "crystalhd.h"
/* HW constants..*/
#define DMA_ENGINE_CNT 2
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 7e0c199f689..d9e3d618f7f 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -15,10 +15,11 @@
along with this driver. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************/
+#include "crystalhd.h"
+
#include <linux/mutex.h>
#include <linux/slab.h>
-#include "crystalhd_lnx.h"
static DEFINE_MUTEX(chd_dec_mutex);
static struct class *crystalhd_class;
@@ -298,7 +299,6 @@ static int chd_dec_open(struct inode *in, struct file *fd)
enum BC_STATUS sts = BC_STS_SUCCESS;
struct crystalhd_user *uc = NULL;
- BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
@@ -327,7 +327,6 @@ static int chd_dec_close(struct inode *in, struct file *fd)
struct crystalhd_adp *adp = chd_get_adp();
struct crystalhd_user *uc;
- BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
@@ -513,8 +512,6 @@ static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
struct crystalhd_adp *pinfo;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG_ENTER;
-
pinfo = pci_get_drvdata(pdev);
if (!pinfo) {
BCMLOG_ERR("could not get adp\n");
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index a2b5a56be6d..a81f9298b0a 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -1,7 +1,7 @@
/***************************************************************************
* Copyright (c) 2005-2009, Broadcom Corporation.
*
- * Name: crystalhd_lnx . c
+ * Name: crystalhd_lnx . h
*
* Description:
* BCM70012 Linux driver
@@ -48,11 +48,10 @@
#include <asm/system.h>
#include <linux/uaccess.h>
-#include "crystalhd_cmds.h"
+#include "crystalhd.h"
#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver"
-
/* OS specific PCI information structure and adapter information. */
struct crystalhd_adp {
/* Hardware borad/PCI specifics */
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 5fa0c6e10ce..b3a637814a1 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -24,10 +24,9 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
-#include <linux/slab.h>
+#include "crystalhd.h"
-#include "crystalhd_misc.h"
-#include "crystalhd_lnx.h"
+#include <linux/slab.h>
uint32_t g_linklog_level;
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 4d617235742..84c87938a83 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -28,6 +28,8 @@
#ifndef _CRYSTALHD_MISC_H_
#define _CRYSTALHD_MISC_H_
+#include "crystalhd.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -35,8 +37,6 @@
#include <linux/ioctl.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
-#include <asm/system.h>
-#include "bc_dts_glob_lnx.h"
/* Global log level variable defined in crystal_misc.c file */
extern uint32_t g_linklog_level;
@@ -200,29 +200,21 @@ enum _chd_log_levels {
BCMLOG_INFO = 0x00000001, /* Generic informational */
BCMLOG_DBG = 0x00000002, /* First level Debug info */
BCMLOG_SSTEP = 0x00000004, /* Stepping information */
- BCMLOG_ENTER_LEAVE = 0x00000008, /* stack tracking */
};
-#define BCMLOG_ENTER \
-if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk(KERN_DEBUG "Entered %s\n", __func__); \
-}
-#define BCMLOG_LEAVE \
-if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk(KERN_DEBUG "Leaving %s\n", __func__); \
-}
+#define BCMLOG(trace, fmt, args...) \
+do { \
+ if (g_linklog_level & trace) \
+ printk(fmt, ##args); \
+} while (0)
-#define BCMLOG(trace, fmt, args...) \
-if (g_linklog_level & trace) { \
- printk(fmt, ##args); \
-}
-#define BCMLOG_ERR(fmt, args...) \
-do { \
- if (g_linklog_level & BCMLOG_ERROR) { \
- printk(KERN_ERR "*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \
- } \
-} while (0);
+#define BCMLOG_ERR(fmt, args...) \
+do { \
+ if (g_linklog_level & BCMLOG_ERROR) \
+ printk(KERN_ERR "*ERR*:%s:%d: "fmt, \
+ __FILE__, __LINE__, ##args); \
+} while (0)
#endif
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 3458aa713a3..82657233c8b 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -11,6 +11,6 @@ TODO:
- Use of kmem_cache seems a bit unusual
Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Mark Einon <mark.einon@gmail.com>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 2c4069fcd98..3f919babe79 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -802,7 +802,7 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
/* THIS IS A WORKAROUND:
* I need to call this function twice to get my card in a
* LG M1 Express Dual running. I tried also a msleep before this
- * function, because I thougth there could be some time condidions
+ * function, because I thought there could be some time condidions
* but it didn't work. Call the whole function twice also work.
*/
if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
@@ -987,7 +987,7 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
writel(station1, &macregs->station_addr_1);
writel(station2, &macregs->station_addr_2);
- /* Max ethernet packet in bytes that will passed by the mac without
+ /* Max ethernet packet in bytes that will be passed by the mac without
* being truncated. Allow the MAC to pass 4 more than our max packet
* size. This is 4 for the Ethernet CRC.
*
@@ -3109,7 +3109,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
+ netif_rx_ni(skb);
} else {
rfd->len = 0;
}
@@ -4413,7 +4413,7 @@ static void et131x_up(struct net_device *netdev)
/**
* et131x_down - Bring down the device
- * @netdev: device to be broght down
+ * @netdev: device to be brought down
*/
static void et131x_down(struct net_device *netdev)
{
@@ -5177,7 +5177,7 @@ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
/* Make sure the requested MAC is valid */
if (!is_valid_ether_addr(address->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
et131x_disable_txrx(netdev);
et131x_handle_send_interrupt(adapter);
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 7eed3c8986f..864379b4e8d 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -596,7 +596,7 @@ struct rxdma_regs { /* Location: */
* structure for tx test reg in txmac address map
* located at address 0x3014
* 31-17: unused
- * 16: reserved1
+ * 16: reserved
* 15: txtest_en
* 14-11: unused
* 10-0: txq test pointer
@@ -1485,7 +1485,7 @@ struct address_map {
* 3: reserved
* 2: ignore_10g_fr
* 1: reserved
- * 0: preamble_supress_en
+ * 0: preamble_suppress_en
*/
/* MI Register 22: PHY Configuration Reg(0x16)
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index d8efed65744..3bf0f40e97f 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -450,7 +450,7 @@ exit:
/**
* usb_alphatrack_poll
*/
-static unsigned int usb_alphatrack_poll(struct file *file, poll_table * wait)
+static unsigned int usb_alphatrack_poll(struct file *file, poll_table *wait)
{
struct usb_alphatrack *dev;
unsigned int mask = 0;
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index cf47a5d191f..29e99bbcae4 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -471,7 +471,7 @@ exit:
/**
* usb_tranzport_poll
*/
-static unsigned int usb_tranzport_poll(struct file *file, poll_table * wait)
+static unsigned int usb_tranzport_poll(struct file *file, poll_table *wait)
{
struct usb_tranzport *dev;
unsigned int mask = 0;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 7faeadad1ff..71aaad31270 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -29,10 +29,10 @@
#define FT1000_PROC "ft1000"
#define MAX_FILE_LEN 255
-#define PUTM_TO_PAGE(len,page,args...) \
+#define PUTM_TO_PAGE(len, page, args...) \
len += snprintf(page+len, PAGE_SIZE - len, args)
-#define PUTX_TO_PAGE(len,page,message,size,var) \
+#define PUTX_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
@@ -40,7 +40,7 @@
} \
len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
-#define PUTD_TO_PAGE(len,page,message,size,var) \
+#define PUTD_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
deleted file mode 100644
index 60ac479a290..00000000000
--- a/drivers/staging/hv/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config HYPERV_STORAGE
- tristate "Microsoft Hyper-V virtual storage driver"
- depends on HYPERV && SCSI
- help
- Select this option to enable the Hyper-V virtual storage driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
deleted file mode 100644
index af95a6b7e43..00000000000
--- a/drivers/staging/hv/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-
-hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
deleted file mode 100644
index dea7d92dfdc..00000000000
--- a/drivers/staging/hv/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - audit the scsi driver
-
-Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 1abb80cb884..8926f2448cc 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -62,7 +62,7 @@ Then fill in the following:
An optional associated buffer.
- indio_dev->pollfunc:
Poll function related elements. This controls what occurs when a trigger
- to which this device is attached sends and event.
+ to which this device is attached sends an event.
- indio_dev->channels:
Specification of device channels. Most attributes etc are built
form this spec.
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
new file mode 100644
index 00000000000..0d21a277305
--- /dev/null
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -0,0 +1,241 @@
+/* Industrialio event test code.
+ *
+ * Copyright (c) 2011-2012 Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is primarily intended as an example application.
+ * Reads the current buffer setup from sysfs and starts a short capture
+ * from the specified device, pretty printing the result after appropriate
+ * conversion.
+ *
+ * Usage:
+ * iio_event_monitor <device_name>
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <unistd.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <poll.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include "iio_utils.h"
+#include "../events.h"
+
+static const char * const iio_chan_type_name_spec[] = {
+ [IIO_VOLTAGE] = "voltage",
+ [IIO_CURRENT] = "current",
+ [IIO_POWER] = "power",
+ [IIO_ACCEL] = "accel",
+ [IIO_ANGL_VEL] = "anglvel",
+ [IIO_MAGN] = "magn",
+ [IIO_LIGHT] = "illuminance",
+ [IIO_INTENSITY] = "intensity",
+ [IIO_PROXIMITY] = "proximity",
+ [IIO_TEMP] = "temp",
+ [IIO_INCLI] = "incli",
+ [IIO_ROT] = "rot",
+ [IIO_ANGL] = "angl",
+ [IIO_TIMESTAMP] = "timestamp",
+ [IIO_CAPACITANCE] = "capacitance",
+};
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static const char * const iio_modifier_names[] = {
+ [IIO_MOD_X] = "x",
+ [IIO_MOD_Y] = "y",
+ [IIO_MOD_Z] = "z",
+ [IIO_MOD_LIGHT_BOTH] = "both",
+ [IIO_MOD_LIGHT_IR] = "ir",
+};
+
+static bool event_is_known(struct iio_event_data *event)
+{
+ enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
+ enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
+ enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
+ enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
+
+ switch (type) {
+ case IIO_VOLTAGE:
+ case IIO_CURRENT:
+ case IIO_POWER:
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ case IIO_MAGN:
+ case IIO_LIGHT:
+ case IIO_INTENSITY:
+ case IIO_PROXIMITY:
+ case IIO_TEMP:
+ case IIO_INCLI:
+ case IIO_ROT:
+ case IIO_ANGL:
+ case IIO_TIMESTAMP:
+ case IIO_CAPACITANCE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (mod) {
+ case IIO_NO_MOD:
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ case IIO_MOD_LIGHT_BOTH:
+ case IIO_MOD_LIGHT_IR:
+ break;
+ default:
+ return false;
+ }
+
+ switch (ev_type) {
+ case IIO_EV_TYPE_THRESH:
+ case IIO_EV_TYPE_MAG:
+ case IIO_EV_TYPE_ROC:
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_EITHER:
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void print_event(struct iio_event_data *event)
+{
+ enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
+ enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
+ enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
+ enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id);
+ int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id);
+ bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id);
+
+ if (!event_is_known(event)) {
+ printf("Unknown event: time: %lld, id: %llx\n",
+ event->timestamp, event->id);
+ return;
+ }
+
+ printf("Event: time: %lld, ", event->timestamp);
+
+ if (mod != IIO_NO_MOD) {
+ printf("type: %s(%s), ",
+ iio_chan_type_name_spec[type],
+ iio_modifier_names[mod]);
+ } else {
+ printf("type: %s, ",
+ iio_chan_type_name_spec[type]);
+ }
+
+ if (diff && chan >= 0 && chan2 >= 0)
+ printf("channel: %d-%d, ", chan, chan2);
+ else if (chan >= 0)
+ printf("channel: %d, ", chan);
+
+ printf("evtype: %s, direction: %s\n",
+ iio_ev_type_text[ev_type],
+ iio_ev_dir_text[dir]);
+}
+
+int main(int argc, char **argv)
+{
+ struct iio_event_data event;
+ const char *device_name;
+ char *chrdev_name;
+ int ret;
+ int dev_num;
+ int fd, event_fd;
+
+ if (argc <= 1) {
+ printf("Usage: %s <device_name>\n", argv[0]);
+ return -1;
+ }
+
+ device_name = argv[1];
+
+ dev_num = find_type_by_name(device_name, "iio:device");
+ if (dev_num >= 0) {
+ printf("Found IIO device with name %s with device number %d\n",
+ device_name, dev_num);
+ ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ } else {
+ /* If we can't find a IIO device by name assume device_name is a
+ IIO chrdev */
+ chrdev_name = strdup(device_name);
+ }
+
+ fd = open(chrdev_name, 0);
+ if (fd == -1) {
+ fprintf(stdout, "Failed to open %s\n", chrdev_name);
+ ret = -errno;
+ goto error_free_chrdev_name;
+ }
+
+ ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd);
+
+ close(fd);
+
+ if (ret == -1 || event_fd == -1) {
+ fprintf(stdout, "Failed to retrieve event fd\n");
+ ret = -errno;
+ goto error_free_chrdev_name;
+ }
+
+ while (true) {
+ ret = read(event_fd, &event, sizeof(event));
+ if (ret == -1) {
+ if (errno == EAGAIN) {
+ printf("nothing available\n");
+ continue;
+ } else {
+ perror("Failed to read event from device");
+ ret = -errno;
+ break;
+ }
+ }
+
+ print_event(&event);
+ }
+
+ close(event_fd);
+error_free_chrdev_name:
+ free(chrdev_name);
+error_ret:
+ return ret;
+}
diff --git a/drivers/staging/iio/Documentation/inkernel.txt b/drivers/staging/iio/Documentation/inkernel.txt
new file mode 100644
index 00000000000..a05823e955d
--- /dev/null
+++ b/drivers/staging/iio/Documentation/inkernel.txt
@@ -0,0 +1,58 @@
+Industrial I/O Subsystem in kernel consumers.
+
+The IIO subsystem can act as a layer under other elements of the kernel
+providing a means of obtaining ADC type readings or of driving DAC type
+signals. The functionality supported will grow as use cases arise.
+
+Describing the channel mapping (iio/machine.h)
+
+Channel associations are described using:
+
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+};
+
+adc_channel_label identifies the channel on the IIO device by being
+matched against the datasheet_name field of the iio_chan_spec.
+
+consumer_dev_name allows identification of the consumer device.
+This are then used to find the channel mapping from the consumer device (see
+below).
+
+Finally consumer_channel is a string identifying the channel to the consumer.
+(Perhaps 'battery_voltage' or similar).
+
+An array of these structures is then passed to the IIO driver.
+
+Supporting in kernel interfaces in the driver (driver.h)
+
+The driver must provide datasheet_name values for its channels and
+must pass the iio_map structures and a pointer to its own iio_dev structure
+ on to the core via a call to iio_map_array_register. On removal,
+iio_map_array_unregister reverses this process.
+
+The result of this is that the IIO core now has all the information needed
+to associate a given channel with the consumer requesting it.
+
+Acting as an IIO consumer (consumer.h)
+
+The consumer first has to obtain an iio_channel structure from the core
+by calling iio_channel_get(). The correct channel is identified by:
+
+* matching dev or dev_name against consumer_dev and consumer_dev_name
+* matching consumer_channel against consumer_channel in the map
+
+There are then a number of functions that can be used to get information
+about this channel such as it's current reading.
+
+e.g.
+iio_st_read_channel_raw() - get a reading
+iio_st_read_channel_type() - get the type of channel
+
+There is also provision for retrieving all of the channels associated
+with a given consumer. This is useful for generic drivers such as
+iio_hwmon where the number and naming of channels is not known by the
+consumer driver. To do this, use iio_st_channel_get_all.
+
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 90162aa8b2d..fe158671888 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -11,6 +11,13 @@ menuconfig IIO
number of different physical interfaces (i2c, spi, etc). See
drivers/staging/iio/Documentation for more information.
if IIO
+config IIO_ST_HWMON
+ tristate "Hwmon driver that uses channels specified via iio maps"
+ depends on HWMON
+ help
+ This is a platform driver that in combination with a suitable
+ map allows IIO devices to provide basic hwmon functionality
+ for those channels specified in the map.
config IIO_BUFFER
bool "Enable buffer support within IIO"
@@ -79,7 +86,7 @@ config IIO_SIMPLE_DUMMY
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
- without hardward.
+ without hardware.
if IIO_SIMPLE_DUMMY
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 1340aead18b..5075291dda7 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_IIO) += industrialio.o
-industrialio-y := industrialio-core.o
+industrialio-y := industrialio-core.o industrialio-event.o inkern.o
industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
@@ -17,6 +17,8 @@ iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_BUFFER) += iio_simple_dummy_buffer.o
obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o
+obj-$(CONFIG_IIO_ST_HWMON) += iio_hwmon.o
+
obj-y += accel/
obj-y += adc/
obj-y += addac/
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 26c610faee3..97f9e6b159d 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -115,9 +115,7 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 064640d15e4..6a8963db4f6 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -117,9 +117,7 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 4081179dfa5..5c8ab733886 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -112,8 +112,6 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16204_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 2a6fd334f5f..57254b6b38b 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -113,8 +113,6 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16209_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index e23622d96f9..43ba84e993a 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -110,8 +110,6 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16240_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 2db383fc274..ae5f225b4bb 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -187,12 +187,10 @@ void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
#define lis3l02dq_free_buf iio_sw_rb_free
#define lis3l02dq_alloc_buf iio_sw_rb_allocate
-#define lis3l02dq_access_funcs ring_sw_access_funcs
#endif
#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO
#define lis3l02dq_free_buf iio_kfifo_free
#define lis3l02dq_alloc_buf iio_kfifo_allocate
-#define lis3l02dq_access_funcs kfifo_access_funcs
#endif
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 98c5c92d345..0fc3973f32a 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -239,7 +239,7 @@ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
__lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
if (state == false) {
/*
- * A possible quirk with teh handler is currently worked around
+ * A possible quirk with the handler is currently worked around
* by ensuring outstanding read events are cleared.
*/
ret = lis3l02dq_read_all(indio_dev, NULL);
@@ -406,8 +406,6 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
return -ENOMEM;
indio_dev->buffer = buffer;
- /* Effectively select the buffer implementation */
- indio_dev->buffer->access = &lis3l02dq_access_funcs;
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index ad38dd955cd..131daac9001 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -136,7 +136,7 @@
#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
-/* Values of mulipexed registers (write to ctrl_data after select) */
+/* Values of multiplexed registers (write to ctrl_data after select) */
#define SCA3000_REG_ADDR_CTRL_DATA 0x22
/* Measurement modes available on some sca3000 series chips. Code assumes others
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index d9decea4fa6..592eabd85f3 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -193,4 +193,13 @@ config MAX1363_RING_BUFFER
Say yes here to include ring buffer support in the MAX1363
ADC driver.
+config LPC32XX_ADC
+ tristate "NXP LPC32XX ADC"
+ depends on ARCH_LPC32XX && !TOUCHSCREEN_LPC32XX
+ help
+ Say yes here to build support for the integrated ADC inside the
+ LPC32XX SoC. Note that this feature uses the same hardware as the
+ touchscreen driver, so you can only select one of the two drivers
+ (lpc32xx_adc or lpc32xx_ts). Provides direct access via sysfs.
+
endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index ceee7f3c306..f83ab9551d8 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_ADT7310) += adt7310.o
obj-$(CONFIG_ADT7410) += adt7410.o
obj-$(CONFIG_AD7280) += ad7280a.o
+obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 45f4504ed92..9fd6d63d299 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -561,8 +561,6 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7192_trigger_handler,
IRQF_ONESHOT,
@@ -824,25 +822,20 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static umode_t ad7192_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if ((st->devid != ID_AD7195) &&
- (attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad7192_attribute_group = {
.attrs = ad7192_attributes,
- .is_visible = ad7192_attr_is_visible,
+};
+
+static struct attribute *ad7195_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7195_attribute_group = {
+ .attrs = ad7195_attributes,
};
static int ad7192_read_raw(struct iio_dev *indio_dev,
@@ -972,6 +965,15 @@ static const struct iio_info ad7192_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad7195_info = {
+ .read_raw = &ad7192_read_raw,
+ .write_raw = &ad7192_write_raw,
+ .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .attrs = &ad7195_attribute_group,
+ .validate_trigger = ad7192_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \
{ .type = IIO_VOLTAGE, \
.differential = 1, \
@@ -1064,7 +1066,10 @@ static int __devinit ad7192_probe(struct spi_device *spi)
indio_dev->channels = ad7192_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
indio_dev->available_scan_masks = st->available_scan_masks;
- indio_dev->info = &ad7192_info;
+ if (st->devid == ID_AD7195)
+ indio_dev->info = &ad7195_info;
+ else
+ indio_dev->info = &ad7192_info;
for (i = 0; i < indio_dev->num_channels; i++)
st->available_scan_masks[i] = (1 << i) | (1 <<
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 0a13616e3db..81d6b6128cb 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -321,7 +321,7 @@ static int ad7291_read_event_value(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
[!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)];
@@ -359,7 +359,7 @@ static int ad7291_write_event_value(struct iio_dev *indio_dev,
case IIO_VOLTAGE:
if (val > AD7291_VALUE_MASK || val < 0)
return -EINVAL;
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
[!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)];
return ad7291_i2c_write(chip, reg, val);
@@ -386,7 +386,7 @@ static int ad7291_read_event_config(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
if (chip->c_mask &
- (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM(event_code))))
+ (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))
return 1;
else
return 0;
@@ -418,12 +418,12 @@ static int ad7291_write_event_config(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
case IIO_VOLTAGE:
if ((!state) && (chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_NUM(event_code)))))
- chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ IIO_EVENT_CODE_EXTRACT_CHAN(event_code)))))
+ chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
(event_code)));
else if (state && (!(chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_NUM(event_code))))))
- chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))))
+ chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
(event_code)));
else
break;
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index d1a12dd015e..feeb0eeba59 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -131,9 +131,6 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad7298_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index 4e298b2a05b..d6af6c05ce1 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -23,7 +23,7 @@
/**
* ad7476_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -98,8 +98,6 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc
= iio_alloc_pollfunc(NULL,
&ad7476_trigger_handler,
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index ddb7ef92f5c..97e8d3d4471 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -197,7 +197,7 @@ static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
ad7606_store_oversampling_ratio, 0);
static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
-static struct attribute *ad7606_attributes[] = {
+static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_range.dev_attr.attr,
&iio_const_attr_in_voltage_range_available.dev_attr.attr,
&iio_dev_attr_oversampling_ratio.dev_attr.attr,
@@ -205,34 +205,28 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static umode_t ad7606_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
+static const struct attribute_group ad7606_attribute_group_os_and_range = {
+ .attrs = ad7606_attributes_os_and_range,
+};
- umode_t mode = attr->mode;
-
- if (!(gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) &&
- (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
- attr ==
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
- mode = 0;
- else if (!gpio_is_valid(st->pdata->gpio_range) &&
- (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr ||
- attr ==
- &iio_const_attr_in_voltage_range_available.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad7606_attributes_os[] = {
+ &iio_dev_attr_oversampling_ratio.dev_attr.attr,
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
-static const struct attribute_group ad7606_attribute_group = {
- .attrs = ad7606_attributes,
- .is_visible = ad7606_attr_is_visible,
+static const struct attribute_group ad7606_attribute_group_os = {
+ .attrs = ad7606_attributes_os,
+};
+
+static struct attribute *ad7606_attributes_range[] = {
+ &iio_dev_attr_in_voltage_range.dev_attr.attr,
+ &iio_const_attr_in_voltage_range_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_range = {
+ .attrs = ad7606_attributes_range,
};
#define AD7606_CHANNEL(num) \
@@ -435,10 +429,27 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static const struct iio_info ad7606_info = {
+static const struct iio_info ad7606_info_no_os_or_range = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group,
+};
+
+static const struct iio_info ad7606_info_os_and_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os_and_range,
+};
+
+static const struct iio_info ad7606_info_os = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os,
+};
+
+static const struct iio_info ad7606_info_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_range,
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
@@ -483,7 +494,19 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
- indio_dev->info = &ad7606_info;
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_os_and_range;
+ else
+ indio_dev->info = &ad7606_info_os;
+ } else {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_range;
+ else
+ indio_dev->info = &ad7606_info_no_os_or_range;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = st->chip_info->name;
indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index cff97568189..bb152a8e8c9 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -173,18 +173,7 @@ static struct platform_driver ad7606_driver = {
},
};
-static int __init ad7606_init(void)
-{
- return platform_driver_register(&ad7606_driver);
-}
-
-static void __exit ad7606_cleanup(void)
-{
- platform_driver_unregister(&ad7606_driver);
-}
-
-module_init(ad7606_init);
-module_exit(ad7606_cleanup);
+module_platform_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index e8f94a18a94..1ef9fbcaf2d 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -110,8 +110,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 6a058b19c49..84ecde1ad04 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -427,8 +427,6 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7793_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 85076cd962e..d1809079b63 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -131,8 +131,6 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7887_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index d5b581d8bc2..a8458669350 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -256,7 +256,7 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
ret = ad799x_i2c_write16(st,
@@ -275,7 +275,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
u16 valin;
mutex_lock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 5dded9e7820..069765cab27 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -26,7 +26,7 @@
/**
* ad799x_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -141,8 +141,6 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad799x_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index eec2f325d54..caf57c1169b 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -725,32 +725,19 @@ static struct attribute *adt7310_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7310_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
- {
- .attrs = adt7310_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7310_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7310_event_attribute_group = {
+ .attrs = adt7310_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7310_info = {
.attrs = &adt7310_attribute_group,
- .event_attrs = adt7310_event_attribute_group,
+ .event_attrs = &adt7310_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index c62248ceb37..dff3e8ca2d7 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -693,32 +693,19 @@ static struct attribute *adt7410_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7410_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
- {
- .attrs = adt7410_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7410_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7410_event_attribute_group = {
+ .attrs = adt7410_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7410_info = {
.attrs = &adt7410_attribute_group,
- .event_attrs = adt7410_event_attribute_group,
+ .event_attrs = &adt7410_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
new file mode 100644
index 00000000000..dfc9033843a
--- /dev/null
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -0,0 +1,237 @@
+/*
+ * lpc32xx_adc.c - Support for ADC in LPC32XX
+ *
+ * 3-channel, 10-bit ADC
+ *
+ * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * LPC32XX registers definitions
+ */
+#define LPC32XX_ADC_SELECT(x) ((x) + 0x04)
+#define LPC32XX_ADC_CTRL(x) ((x) + 0x08)
+#define LPC32XX_ADC_VALUE(x) ((x) + 0x48)
+
+/* Bit definitions for LPC32XX_ADC_SELECT: */
+#define AD_REFm 0x00000200 /* constant, always write this value! */
+#define AD_REFp 0x00000080 /* constant, always write this value! */
+#define AD_IN 0x00000010 /* multiple of this is the */
+ /* channel number: 0, 1, 2 */
+#define AD_INTERNAL 0x00000004 /* constant, always write this value! */
+
+/* Bit definitions for LPC32XX_ADC_CTRL: */
+#define AD_STROBE 0x00000002
+#define AD_PDN_CTRL 0x00000004
+
+/* Bit definitions for LPC32XX_ADC_VALUE: */
+#define ADC_VALUE_MASK 0x000003FF
+
+#define MOD_NAME "lpc32xx-adc"
+
+struct lpc32xx_adc_info {
+ void __iomem *adc_base;
+ struct clk *clk;
+ struct completion completion;
+
+ u32 value;
+};
+
+static int lpc32xx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct lpc32xx_adc_info *info = iio_priv(indio_dev);
+
+ if (mask == 0) {
+ mutex_lock(&indio_dev->mlock);
+ clk_enable(info->clk);
+ /* Measurement setup */
+ __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
+ LPC32XX_ADC_SELECT(info->adc_base));
+ /* Trigger conversion */
+ __raw_writel(AD_PDN_CTRL | AD_STROBE,
+ LPC32XX_ADC_CTRL(info->adc_base));
+ wait_for_completion(&info->completion); /* set by ISR */
+ clk_disable(info->clk);
+ *val = info->value;
+ mutex_unlock(&indio_dev->mlock);
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc32xx_adc_iio_info = {
+ .read_raw = &lpc32xx_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define LPC32XX_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .address = AD_IN * _index, \
+ .scan_index = _index, \
+}
+
+static struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
+ LPC32XX_ADC_CHANNEL(0),
+ LPC32XX_ADC_CHANNEL(1),
+ LPC32XX_ADC_CHANNEL(2),
+};
+
+static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
+{
+ struct lpc32xx_adc_info *info = (struct lpc32xx_adc_info *) dev_id;
+
+ /* Read value and clear irq */
+ info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
+ ADC_VALUE_MASK;
+ complete(&info->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit lpc32xx_adc_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_adc_info *info = NULL;
+ struct resource *res;
+ int retval = -ENODEV;
+ struct iio_dev *iodev = NULL;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get platform I/O memory\n");
+ retval = -EBUSY;
+ goto errout1;
+ }
+
+ iodev = iio_allocate_device(sizeof(struct lpc32xx_adc_info));
+ if (!iodev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ retval = -ENOMEM;
+ goto errout1;
+ }
+
+ info = iio_priv(iodev);
+
+ info->adc_base = ioremap(res->start, res->end - res->start + 1);
+ if (!info->adc_base) {
+ dev_err(&pdev->dev, "failed mapping memory\n");
+ retval = -EBUSY;
+ goto errout2;
+ }
+
+ info->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock\n");
+ goto errout3;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if ((irq < 0) || (irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "failed getting interrupt resource\n");
+ retval = -EINVAL;
+ goto errout4;
+ }
+
+ retval = request_irq(irq, lpc32xx_adc_isr, 0, MOD_NAME, info);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "failed requesting interrupt\n");
+ goto errout4;
+ }
+
+ platform_set_drvdata(pdev, iodev);
+
+ init_completion(&info->completion);
+
+ iodev->name = MOD_NAME;
+ iodev->dev.parent = &pdev->dev;
+ iodev->info = &lpc32xx_adc_iio_info;
+ iodev->modes = INDIO_DIRECT_MODE;
+ iodev->channels = lpc32xx_adc_iio_channels;
+ iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
+
+ retval = iio_device_register(iodev);
+ if (retval)
+ goto errout5;
+
+ dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
+
+ return 0;
+
+errout5:
+ free_irq(irq, iodev);
+errout4:
+ clk_put(info->clk);
+errout3:
+ iounmap(info->adc_base);
+errout2:
+ iio_free_device(iodev);
+errout1:
+ return retval;
+}
+
+static int __devexit lpc32xx_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iodev = platform_get_drvdata(pdev);
+ struct lpc32xx_adc_info *info = iio_priv(iodev);
+ int irq = platform_get_irq(pdev, 0);
+
+ iio_device_unregister(iodev);
+ free_irq(irq, iodev);
+ platform_set_drvdata(pdev, NULL);
+ clk_put(info->clk);
+ iounmap(info->adc_base);
+ iio_free_device(iodev);
+
+ return 0;
+}
+
+static struct platform_driver lpc32xx_adc_driver = {
+ .probe = lpc32xx_adc_probe,
+ .remove = __devexit_p(lpc32xx_adc_remove),
+ .driver = {
+ .name = MOD_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(lpc32xx_adc_driver);
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("LPC32XX ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index b92cb4af18c..cf3e2ca7e31 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -341,7 +341,7 @@ static struct iio_chan_spec max1361_channels[] =
static struct iio_chan_spec max1363_channels[] =
MAX1363_4X_CHANS(12, MAX1363_EV_M);
-/* Appies to max1236, max1237 */
+/* Applies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
_s0, _s1, _s2, _s3,
s0to1, s0to2, s0to3,
@@ -543,9 +543,9 @@ static int max1363_read_thresh(struct iio_dev *indio_dev,
{
struct max1363_state *st = iio_priv(indio_dev);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
- *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)];
+ *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
else
- *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)];
+ *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
return 0;
}
@@ -568,10 +568,10 @@ static int max1363_write_thresh(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
case IIO_EV_DIR_FALLING:
- st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val;
+ st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
break;
case IIO_EV_DIR_RISING:
- st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val;
+ st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
break;
}
@@ -622,7 +622,7 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
struct max1363_state *st = iio_priv(indio_dev);
int val;
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
@@ -775,7 +775,7 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
int ret = 0;
struct max1363_state *st = iio_priv(indio_dev);
u16 unifiedmask;
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
unifiedmask = st->mask_low | st->mask_high;
@@ -1245,10 +1245,31 @@ static int max1363_initial_setup(struct max1363_state *st)
return max1363_set_scan_mode(st);
}
+static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ unsigned long *masks;
+ int i;
+
+ masks = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
+ if (!masks)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_modes; i++)
+ bitmap_copy(masks + BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
+ max1363_mode_table[st->chip_info->mode_list[i]]
+ .modemask, MAX1363_MAX_CHANNELS);
+
+ indio_dev->available_scan_masks = masks;
+
+ return 0;
+}
+
static int __devinit max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, i;
+ int ret;
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *reg;
@@ -1276,19 +1297,10 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
- indio_dev->available_scan_masks
- = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
- (st->chip_info->num_modes + 1), GFP_KERNEL);
- if (!indio_dev->available_scan_masks) {
- ret = -ENOMEM;
+ ret = max1363_alloc_scan_masks(indio_dev);
+ if (ret)
goto error_free_device;
- }
- for (i = 0; i < st->chip_info->num_modes; i++)
- bitmap_copy(indio_dev->available_scan_masks +
- BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
- max1363_mode_table[st->chip_info->mode_list[i]]
- .modemask, MAX1363_MAX_CHANNELS);
/* Estabilish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f730b3fb971..d0a60a38293 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -116,8 +116,6 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_deallocate_sw_rb;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &max1363_ring_setup_ops;
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 2c03a39220e..9e128dd7d45 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -125,30 +125,14 @@ static const struct i2c_device_id adt7316_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
-#ifdef CONFIG_PM
-static int adt7316_i2c_suspend(struct i2c_client *client, pm_message_t message)
-{
- return adt7316_disable(&client->dev);
-}
-
-static int adt7316_i2c_resume(struct i2c_client *client)
-{
- return adt7316_enable(&client->dev);
-}
-#else
-# define adt7316_i2c_suspend NULL
-# define adt7316_i2c_resume NULL
-#endif
-
static struct i2c_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .pm = ADT7316_PM_OPS,
.owner = THIS_MODULE,
},
.probe = adt7316_i2c_probe,
.remove = __devexit_p(adt7316_i2c_remove),
- .suspend = adt7316_i2c_suspend,
- .resume = adt7316_i2c_resume,
.id_table = adt7316_i2c_id,
};
module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 1ea3cd06299..985f7d8a6eb 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -133,30 +133,14 @@ static const struct spi_device_id adt7316_spi_id[] = {
MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
-#ifdef CONFIG_PM
-static int adt7316_spi_suspend(struct spi_device *spi_dev, pm_message_t message)
-{
- return adt7316_disable(&spi_dev->dev);
-}
-
-static int adt7316_spi_resume(struct spi_device *spi_dev)
-{
- return adt7316_enable(&spi_dev->dev);
-}
-#else
-# define adt7316_spi_suspend NULL
-# define adt7316_spi_resume NULL
-#endif
-
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .pm = ADT7316_PM_OPS,
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
.remove = __devexit_p(adt7316_spi_remove),
- .suspend = adt7316_spi_suspend,
- .resume = adt7316_spi_resume,
.id_table = adt7316_spi_id,
};
module_spi_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 13c39292d3f..fd6a4544405 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2089,24 +2089,25 @@ static struct attribute_group adt7516_event_attribute_group = {
.name = "events",
};
-#ifdef CONFIG_PM
-int adt7316_disable(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int adt7316_disable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 0);
}
-EXPORT_SYMBOL(adt7316_disable);
-int adt7316_enable(struct device *dev)
+static int adt7316_enable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 1);
}
-EXPORT_SYMBOL(adt7316_enable);
+
+SIMPLE_DEV_PM_OPS(adt7316_pm_ops, adt7316_disable, adt7316_enable);
+EXPORT_SYMBOL_GPL(adt7316_pm_ops);
#endif
static const struct iio_info adt7316_info = {
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index d34bd679bb4..4d3efff46ae 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -10,6 +10,7 @@
#define _ADT7316_H_
#include <linux/types.h>
+#include <linux/pm.h>
#define ADT7316_REG_MAX_ADDR 0x3F
@@ -23,9 +24,11 @@ struct adt7316_bus {
int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
};
-#ifdef CONFIG_PM
-int adt7316_disable(struct device *dev);
-int adt7316_enable(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops adt7316_pm_ops;
+#define ADT7316_PM_OPS (&adt7316_pm_ops)
+#else
+#define ADT7316_PM_OPS NULL
#endif
int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
int adt7316_remove(struct device *dev);
diff --git a/drivers/staging/iio/buffer.h b/drivers/staging/iio/buffer.h
index 6fb6e64181a..df2046dcb62 100644
--- a/drivers/staging/iio/buffer.h
+++ b/drivers/staging/iio/buffer.h
@@ -91,8 +91,6 @@ struct iio_buffer {
**/
void iio_buffer_init(struct iio_buffer *buffer);
-void iio_buffer_deinit(struct iio_buffer *buffer);
-
/**
* __iio_update_buffer() - update common elements of buffers
* @buffer: buffer that is the event source
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index b73007dcf4b..e4a08dc9b6f 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -167,7 +167,7 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
u16 value;
u8 sens, timeout;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -279,7 +279,7 @@ static int ad7150_read_event_value(struct iio_dev *indio_dev,
u64 event_code,
int *val)
{
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -309,7 +309,7 @@ static int ad7150_write_event_value(struct iio_dev *indio_dev,
{
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -347,7 +347,7 @@ static ssize_t ad7150_show_timeout(struct device *dev,
u8 value;
/* use the event code for consistency reasons */
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address)
== IIO_EV_DIR_RISING);
@@ -373,7 +373,7 @@ static ssize_t ad7150_store_timeout(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) ==
IIO_EV_DIR_RISING);
u8 data;
diff --git a/drivers/staging/iio/consumer.h b/drivers/staging/iio/consumer.h
new file mode 100644
index 00000000000..36a060cd3a2
--- /dev/null
+++ b/drivers/staging/iio/consumer.h
@@ -0,0 +1,96 @@
+/*
+ * Industrial I/O in kernel consumer interface
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_INKERN_CONSUMER_H_
+#define _IIO_INKERN_CONSUMER_H
+#include "types.h"
+
+struct iio_dev;
+struct iio_chan_spec;
+
+/**
+ * struct iio_channel - everything needed for a consumer to use a channel
+ * @indio_dev: Device on which the channel exists.
+ * @channel: Full description of the channel.
+ */
+struct iio_channel {
+ struct iio_dev *indio_dev;
+ const struct iio_chan_spec *channel;
+};
+
+/**
+ * iio_channel_get() - get description of all that is needed to access channel.
+ * @name: Unique name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *iio_st_channel_get(const char *name,
+ const char *consumer_channel);
+
+/**
+ * iio_st_channel_release() - release channels obtained via iio_st_channel_get
+ * @chan: The channel to be released.
+ */
+void iio_st_channel_release(struct iio_channel *chan);
+
+/**
+ * iio_st_channel_get_all() - get all channels associated with a client
+ * @name: name of consumer device.
+ *
+ * Returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ */
+struct iio_channel *iio_st_channel_get_all(const char *name);
+
+/**
+ * iio_st_channel_release_all() - reverse iio_st_get_all
+ * @chan: Array of channels to be released.
+ */
+void iio_st_channel_release_all(struct iio_channel *chan);
+
+/**
+ * iio_st_read_channel_raw() - read from a given channel
+ * @channel: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_st_read_channel_raw(struct iio_channel *chan,
+ int *val);
+
+/**
+ * iio_st_get_channel_type() - get the type of a channel
+ * @channel: The channel being queried.
+ * @type: The type of the channel.
+ *
+ * returns the enum iio_chan_type of the channel
+ */
+int iio_st_get_channel_type(struct iio_channel *channel,
+ enum iio_chan_type *type);
+
+/**
+ * iio_st_read_channel_scale() - read the scale value for a channel
+ * @channel: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_st_read_channel_scale(struct iio_channel *chan, int *val,
+ int *val2);
+
+#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index 13e27979df2..a57803a5d1a 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -4,11 +4,12 @@
menu "Digital to analog converters"
config AD5064
- tristate "Analog Devices AD5064/64-1/44/24 DAC driver"
+ tristate "Analog Devices AD5064/64-1/65/44/45/24/25, AD5628/48/66/68 DAC driver"
depends on SPI
help
- Say yes here to build support for Analog Devices AD5064, AD5064-1,
- AD5044, AD5024 Digital to Analog Converter.
+ Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
+ AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648, AD5666, AD5668 Digital
+ to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5064.
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 049a855039c..06b162745a3 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -1,5 +1,6 @@
/*
- * AD5064, AD5064-1, AD5044, AD5024 Digital to analog converters driver
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648,
+ * AD5666, AD5668 Digital to analog converters driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -19,7 +20,8 @@
#include "../sysfs.h"
#include "dac.h"
-#define AD5064_DAC_CHANNELS 4
+#define AD5064_MAX_DAC_CHANNELS 8
+#define AD5064_MAX_VREFS 4
#define AD5064_ADDR(x) ((x) << 20)
#define AD5064_CMD(x) ((x) << 24)
@@ -35,7 +37,10 @@
#define AD5064_CMD_CLEAR 0x5
#define AD5064_CMD_LDAC_MASK 0x6
#define AD5064_CMD_RESET 0x7
-#define AD5064_CMD_DAISY_CHAIN_ENABLE 0x8
+#define AD5064_CMD_CONFIG 0x8
+
+#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
+#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
#define AD5064_LDAC_PWRDN_NONE 0x0
#define AD5064_LDAC_PWRDN_1K 0x1
@@ -45,12 +50,17 @@
/**
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
+ * @internal_vref: internal reference voltage. 0 if the chip has no internal
+ * vref.
* @channel: channel specification
-*/
+ * @num_channels: number of channels
+ */
struct ad5064_chip_info {
bool shared_vref;
- struct iio_chan_spec channel[AD5064_DAC_CHANNELS];
+ unsigned long internal_vref;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
};
/**
@@ -61,16 +71,19 @@ struct ad5064_chip_info {
* @pwr_down: whether channel is powered down
* @pwr_down_mode: channel's current power down mode
* @dac_cache: current DAC raw value (chip does not support readback)
+ * @use_internal_vref: set to true if the internal reference voltage should be
+ * used.
* @data: spi transfer buffers
*/
struct ad5064_state {
struct spi_device *spi;
const struct ad5064_chip_info *chip_info;
- struct regulator_bulk_data vref_reg[AD5064_DAC_CHANNELS];
- bool pwr_down[AD5064_DAC_CHANNELS];
- u8 pwr_down_mode[AD5064_DAC_CHANNELS];
- unsigned int dac_cache[AD5064_DAC_CHANNELS];
+ struct regulator_bulk_data vref_reg[AD5064_MAX_VREFS];
+ bool pwr_down[AD5064_MAX_DAC_CHANNELS];
+ u8 pwr_down_mode[AD5064_MAX_DAC_CHANNELS];
+ unsigned int dac_cache[AD5064_MAX_DAC_CHANNELS];
+ bool use_internal_vref;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -81,50 +94,20 @@ struct ad5064_state {
enum ad5064_type {
ID_AD5024,
+ ID_AD5025,
ID_AD5044,
+ ID_AD5045,
ID_AD5064,
ID_AD5064_1,
-};
-
-#define AD5064_CHANNEL(chan, bits) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .output = 1, \
- .channel = (chan), \
- .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
- .address = AD5064_ADDR_DAC(chan), \
- .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
-}
-
-static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
- [ID_AD5024] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 12),
- .channel[1] = AD5064_CHANNEL(1, 12),
- .channel[2] = AD5064_CHANNEL(2, 12),
- .channel[3] = AD5064_CHANNEL(3, 12),
- },
- [ID_AD5044] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 14),
- .channel[1] = AD5064_CHANNEL(1, 14),
- .channel[2] = AD5064_CHANNEL(2, 14),
- .channel[3] = AD5064_CHANNEL(3, 14),
- },
- [ID_AD5064] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 16),
- .channel[1] = AD5064_CHANNEL(1, 16),
- .channel[2] = AD5064_CHANNEL(2, 16),
- .channel[3] = AD5064_CHANNEL(3, 16),
- },
- [ID_AD5064_1] = {
- .shared_vref = true,
- .channel[0] = AD5064_CHANNEL(0, 16),
- .channel[1] = AD5064_CHANNEL(1, 16),
- .channel[2] = AD5064_CHANNEL(2, 16),
- .channel[3] = AD5064_CHANNEL(3, 16),
- },
+ ID_AD5065,
+ ID_AD5628_1,
+ ID_AD5628_2,
+ ID_AD5648_1,
+ ID_AD5648_2,
+ ID_AD5666_1,
+ ID_AD5666_2,
+ ID_AD5668_1,
+ ID_AD5668_2,
};
static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
@@ -160,22 +143,25 @@ static const char ad5064_powerdown_modes[][15] = {
[AD5064_LDAC_PWRDN_3STATE] = "three_state",
};
-static ssize_t ad5064_read_powerdown_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t ad5064_read_powerdown_mode_available(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ return sprintf(buf, "%s %s %s\n", ad5064_powerdown_modes[1],
+ ad5064_powerdown_modes[2], ad5064_powerdown_modes[3]);
+}
+
+static ssize_t ad5064_read_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
return sprintf(buf, "%s\n",
- ad5064_powerdown_modes[st->pwr_down_mode[this_attr->address]]);
+ ad5064_powerdown_modes[st->pwr_down_mode[chan->channel]]);
}
-static ssize_t ad5064_write_powerdown_mode(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t ad5064_write_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
unsigned int mode, i;
int ret;
@@ -192,31 +178,26 @@ static ssize_t ad5064_write_powerdown_mode(struct device *dev,
return -EINVAL;
mutex_lock(&indio_dev->mlock);
- st->pwr_down_mode[this_attr->address] = mode;
+ st->pwr_down_mode[chan->channel] = mode;
- ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ ret = ad5064_sync_powerdown_mode(st, chan->channel);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-static ssize_t ad5064_read_dac_powerdown(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- return sprintf(buf, "%d\n", st->pwr_down[this_attr->address]);
+ return sprintf(buf, "%d\n", st->pwr_down[chan->channel]);
}
-static ssize_t ad5064_write_dac_powerdown(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
bool pwr_down;
int ret;
@@ -225,53 +206,24 @@ static ssize_t ad5064_write_dac_powerdown(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- st->pwr_down[this_attr->address] = pwr_down;
+ st->pwr_down[chan->channel] = pwr_down;
- ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ ret = ad5064_sync_powerdown_mode(st, chan->channel);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
- "1kohm_to_gnd 100kohm_to_gnd three_state");
-
-#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_chan) \
- IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown_mode, \
- S_IRUGO | S_IWUSR, \
- ad5064_read_powerdown_mode, \
- ad5064_write_powerdown_mode, _chan);
-
-#define IIO_DEV_ATTR_DAC_POWERDOWN(_chan) \
- IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown, \
- S_IRUGO | S_IWUSR, \
- ad5064_read_dac_powerdown, \
- ad5064_write_dac_powerdown, _chan)
-
-static IIO_DEV_ATTR_DAC_POWERDOWN(0);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
-static IIO_DEV_ATTR_DAC_POWERDOWN(1);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
-static IIO_DEV_ATTR_DAC_POWERDOWN(2);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
-static IIO_DEV_ATTR_DAC_POWERDOWN(3);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
-
-static struct attribute *ad5064_attributes[] = {
- &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- NULL,
-};
+static int ad5064_get_vref(struct ad5064_state *st,
+ struct iio_chan_spec const *chan)
+{
+ unsigned int i;
-static const struct attribute_group ad5064_attribute_group = {
- .attrs = ad5064_attributes,
-};
+ if (st->use_internal_vref)
+ return st->chip_info->internal_vref;
+
+ i = st->chip_info->shared_vref ? 0 : chan->channel;
+ return regulator_get_voltage(st->vref_reg[i].consumer);
+}
static int ad5064_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
@@ -280,7 +232,6 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
- unsigned int vref;
int scale_uv;
switch (m) {
@@ -288,8 +239,7 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- vref = st->chip_info->shared_vref ? 0 : chan->channel;
- scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
+ scale_uv = ad5064_get_vref(st, chan);
if (scale_uv < 0)
return scale_uv;
@@ -331,13 +281,144 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5064_info = {
.read_raw = ad5064_read_raw,
.write_raw = ad5064_write_raw,
- .attrs = &ad5064_attribute_group,
.driver_module = THIS_MODULE,
};
+static struct iio_chan_spec_ext_info ad5064_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5064_read_dac_powerdown,
+ .write = ad5064_write_dac_powerdown,
+ },
+ {
+ .name = "powerdown_mode",
+ .read = ad5064_read_powerdown_mode,
+ .write = ad5064_write_powerdown_mode,
+ },
+ {
+ .name = "powerdown_mode_available",
+ .shared = true,
+ .read = ad5064_read_powerdown_mode_available,
+ },
+ { },
+};
+
+#define AD5064_CHANNEL(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = AD5064_ADDR_DAC(chan), \
+ .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
+ .ext_info = ad5064_ext_info, \
+}
+
+#define DECLARE_AD5064_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5064_CHANNEL(0, bits), \
+ AD5064_CHANNEL(1, bits), \
+ AD5064_CHANNEL(2, bits), \
+ AD5064_CHANNEL(3, bits), \
+ AD5064_CHANNEL(4, bits), \
+ AD5064_CHANNEL(5, bits), \
+ AD5064_CHANNEL(6, bits), \
+ AD5064_CHANNEL(7, bits), \
+}
+
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
+
+static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ [ID_AD5024] = {
+ .shared_vref = false,
+ .channels = ad5024_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5025] = {
+ .shared_vref = false,
+ .channels = ad5024_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5044] = {
+ .shared_vref = false,
+ .channels = ad5044_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5045] = {
+ .shared_vref = false,
+ .channels = ad5044_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5064] = {
+ .shared_vref = false,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5064_1] = {
+ .shared_vref = true,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5065] = {
+ .shared_vref = false,
+ .channels = ad5064_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5628_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5628_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5666_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5666_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5668_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5668_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+};
+
static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
{
- return st->chip_info->shared_vref ? 1 : AD5064_DAC_CHANNELS;
+ return st->chip_info->shared_vref ? 1 : st->chip_info->num_channels;
}
static const char * const ad5064_vref_names[] = {
@@ -376,14 +457,24 @@ static int __devinit ad5064_probe(struct spi_device *spi)
ret = regulator_bulk_get(&st->spi->dev, ad5064_num_vref(st),
st->vref_reg);
- if (ret)
- goto error_free;
-
- ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
- if (ret)
- goto error_free_reg;
+ if (ret) {
+ if (!st->chip_info->internal_vref)
+ goto error_free;
+ st->use_internal_vref = true;
+ ret = ad5064_spi_write(st, AD5064_CMD_CONFIG, 0,
+ AD5064_CONFIG_INT_VREF_ENABLE, 0);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable internal vref: %d\n",
+ ret);
+ goto error_free;
+ }
+ } else {
+ ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
+ if (ret)
+ goto error_free_reg;
+ }
- for (i = 0; i < AD5064_DAC_CHANNELS; ++i) {
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
st->dac_cache[i] = 0x8000;
}
@@ -392,8 +483,8 @@ static int __devinit ad5064_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5064_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = AD5064_DAC_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
ret = iio_device_register(indio_dev);
if (ret)
@@ -402,9 +493,11 @@ static int __devinit ad5064_probe(struct spi_device *spi)
return 0;
error_disable_reg:
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref)
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
error_free_reg:
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref)
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
error_free:
iio_free_device(indio_dev);
@@ -419,8 +512,10 @@ static int __devexit ad5064_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref) {
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ }
iio_free_device(indio_dev);
@@ -429,9 +524,21 @@ static int __devexit ad5064_remove(struct spi_device *spi)
static const struct spi_device_id ad5064_id[] = {
{"ad5024", ID_AD5024},
+ {"ad5025", ID_AD5025},
{"ad5044", ID_AD5044},
+ {"ad5045", ID_AD5045},
{"ad5064", ID_AD5064},
{"ad5064-1", ID_AD5064_1},
+ {"ad5065", ID_AD5065},
+ {"ad5628-1", ID_AD5628_1},
+ {"ad5628-2", ID_AD5628_2},
+ {"ad5648-1", ID_AD5648_1},
+ {"ad5648-2", ID_AD5648_2},
+ {"ad5666-1", ID_AD5666_1},
+ {"ad5666-2", ID_AD5666_2},
+ {"ad5668-1", ID_AD5668_1},
+ {"ad5668-2", ID_AD5668_2},
+ {"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
{}
};
MODULE_DEVICE_TABLE(spi, ad5064_id);
@@ -448,5 +555,5 @@ static struct spi_driver ad5064_driver = {
module_spi_driver(ad5064_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
+MODULE_DESCRIPTION("Analog Devices AD5024/25/44/45/64/64-1/65, AD5628/48/66/68 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 710b256affc..cec3693b50a 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -439,8 +439,8 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
struct iio_chan_spec *channels;
unsigned int i;
- channels = kcalloc(sizeof(struct iio_chan_spec),
- st->chip_info->num_channels, GFP_KERNEL);
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
if (!channels)
return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
index eff97ae05c4..4c50716fa80 100644
--- a/drivers/staging/iio/dac/ad5380.c
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -363,8 +363,8 @@ static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
struct iio_chan_spec *channels;
unsigned int i;
- channels = kcalloc(sizeof(struct iio_chan_spec),
- st->chip_info->num_channels, GFP_KERNEL);
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
if (!channels)
return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
index 71ee8682476..0b040b20469 100644
--- a/drivers/staging/iio/dac/ad5421.c
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -536,18 +536,7 @@ static struct spi_driver ad5421_driver = {
.probe = ad5421_probe,
.remove = __devexit_p(ad5421_remove),
};
-
-static __init int ad5421_init(void)
-{
- return spi_register_driver(&ad5421_driver);
-}
-module_init(ad5421_init);
-
-static __exit void ad5421_exit(void)
-{
- spi_unregister_driver(&ad5421_driver);
-}
-module_exit(ad5421_exit);
+module_spi_driver(ad5421_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index 693e7482524..633ffbb2181 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -149,30 +149,8 @@ static struct attribute *ad5446_attributes[] = {
NULL,
};
-static umode_t ad5446_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (!st->chip_info->store_pwr_down &&
- (attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
- attr == &iio_dev_attr_out_voltage_powerdown_mode.
- dev_attr.attr ||
- attr ==
- &iio_const_attr_out_voltage_powerdown_mode_available.
- dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad5446_attribute_group = {
.attrs = ad5446_attributes,
- .is_visible = ad5446_attr_is_visible,
};
#define AD5446_CHANNEL(bits, storage, shift) { \
@@ -321,6 +299,12 @@ static const struct iio_info ad5446_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad5446_info_no_pwr_down = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad5446_probe(struct spi_device *spi)
{
struct ad5446_state *st;
@@ -350,10 +334,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
st->reg = reg;
st->spi = spi;
- /* Estabilish that the iio_dev is a child of the spi device */
+ /* Establish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad5446_info;
+ if (st->chip_info->store_pwr_down)
+ indio_dev->info = &ad5446_info;
+ else
+ indio_dev->info = &ad5446_info_no_pwr_down;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
index ff91480ae65..f73a7307949 100644
--- a/drivers/staging/iio/dac/ad5764.c
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -375,18 +375,7 @@ static struct spi_driver ad5764_driver = {
.remove = __devexit_p(ad5764_remove),
.id_table = ad5764_ids,
};
-
-static int __init ad5764_spi_init(void)
-{
- return spi_register_driver(&ad5764_driver);
-}
-module_init(ad5764_spi_init);
-
-static void __exit ad5764_spi_exit(void)
-{
- spi_unregister_driver(&ad5764_driver);
-}
-module_exit(ad5764_spi_exit);
+module_spi_driver(ad5764_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index a4df6d7443c..41483c72cec 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -179,20 +179,27 @@ static struct attribute_group max518_attribute_group = {
.attrs = max518_attributes,
};
-static int max517_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int max517_suspend(struct device *dev)
{
u8 outbuf = COMMAND_PD;
- return i2c_master_send(client, &outbuf, 1);
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
-static int max517_resume(struct i2c_client *client)
+static int max517_resume(struct device *dev)
{
u8 outbuf = 0;
- return i2c_master_send(client, &outbuf, 1);
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
+static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
+#define MAX517_PM_OPS (&max517_pm_ops)
+#else
+#define MAX517_PM_OPS NULL
+#endif
+
static const struct iio_info max517_info = {
.attrs = &max517_attribute_group,
.driver_module = THIS_MODULE,
@@ -273,11 +280,10 @@ MODULE_DEVICE_TABLE(i2c, max517_id);
static struct i2c_driver max517_driver = {
.driver = {
.name = MAX517_DRV_NAME,
+ .pm = MAX517_PM_OPS,
},
.probe = max517_probe,
.remove = max517_remove,
- .suspend = max517_suspend,
- .resume = max517_resume,
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 5e67104fea1..38a2de08626 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -281,29 +281,27 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static umode_t ad9834_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
- ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) ||
- (attr ==
- &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr)))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad9833_attributes[] = {
+ &iio_dev_attr_dds0_freq0.dev_attr.attr,
+ &iio_dev_attr_dds0_freq1.dev_attr.attr,
+ &iio_const_attr_dds0_freq_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_phase0.dev_attr.attr,
+ &iio_dev_attr_dds0_phase1.dev_attr.attr,
+ &iio_const_attr_dds0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_out_enable.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
+ NULL,
+};
static const struct attribute_group ad9834_attribute_group = {
.attrs = ad9834_attributes,
- .is_visible = ad9834_attr_is_visible,
+};
+
+static const struct attribute_group ad9833_attribute_group = {
+ .attrs = ad9833_attributes,
};
static const struct iio_info ad9834_info = {
@@ -311,6 +309,11 @@ static const struct iio_info ad9834_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad9833_info = {
+ .attrs = &ad9833_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad9834_probe(struct spi_device *spi)
{
struct ad9834_platform_data *pdata = spi->dev.platform_data;
@@ -344,7 +347,15 @@ static int __devinit ad9834_probe(struct spi_device *spi)
st->reg = reg;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad9834_info;
+ switch (st->devid) {
+ case ID_AD9833:
+ case ID_AD9837:
+ indio_dev->info = &ad9833_info;
+ break;
+ default:
+ indio_dev->info = &ad9834_info;
+ break;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default messages */
diff --git a/drivers/staging/iio/driver.h b/drivers/staging/iio/driver.h
new file mode 100644
index 00000000000..a4f8b2e05af
--- /dev/null
+++ b/drivers/staging/iio/driver.h
@@ -0,0 +1,34 @@
+/*
+ * Industrial I/O in kernel access map interface.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_INKERN_H_
+#define _IIO_INKERN_H_
+
+struct iio_map;
+
+/**
+ * iio_map_array_register() - tell the core about inkernel consumers
+ * @indio_dev: provider device
+ * @map: array of mappings specifying association of channel with client
+ */
+int iio_map_array_register(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+/**
+ * iio_map_array_unregister() - tell the core to remove consumer mappings
+ * @indio_dev: provider device
+ * @map: array of mappings to remove. Note these must have same memory
+ * addresses as those originally added not just equal parameter
+ * values.
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+#endif
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
index bfb63400fa6..c25f0e3c92e 100644
--- a/drivers/staging/iio/events.h
+++ b/drivers/staging/iio/events.h
@@ -96,8 +96,10 @@ enum iio_event_direction {
/* Event code number extraction depends on which type of event we have.
* Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
#endif
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 699a6152c40..711f15122a0 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -115,8 +115,6 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16260_ring_setup_ops;
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index be6ced31f65..b9cd454f69e 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -26,7 +26,7 @@ enum iio_data_type {
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
- /* 0 is reserverd for raw attributes */
+ /* 0 is reserved for raw attributes */
IIO_CHAN_INFO_SCALE = 1,
IIO_CHAN_INFO_OFFSET,
IIO_CHAN_INFO_CALIBSCALE,
@@ -88,10 +88,29 @@ enum iio_endian {
IIO_LE,
};
+struct iio_chan_spec;
+struct iio_dev;
+
+/**
+ * struct iio_chan_spec_ext_info - Extended channel info attribute
+ * @name: Info attribute name
+ * @shared: Whether this attribute is shared between all channels.
+ * @read: Read callback for this info attribute, may be NULL.
+ * @write: Write callback for this info attribute, may be NULL.
+ */
+struct iio_chan_spec_ext_info {
+ const char *name;
+ bool shared;
+ ssize_t (*read)(struct iio_dev *, struct iio_chan_spec const *,
+ char *buf);
+ ssize_t (*write)(struct iio_dev *, struct iio_chan_spec const *,
+ const char *buf, size_t len);
+};
+
/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
- * @channel: What number or name do we wish to asign the channel.
+ * @channel: What number or name do we wish to assign the channel.
* @channel2: If there is a second number for a differential
* channel then this is it. If modified is set then the
* value here specifies the modifier.
@@ -107,11 +126,14 @@ enum iio_endian {
* @info_mask: What information is to be exported about this channel.
* This includes calibbias, scale etc.
* @event_mask: What events can this channel produce.
+ * @ext_info: Array of extended info attributes for this channel.
+ * The array is NULL terminated, the last element should
+ * have it's name field set to NULL.
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
* @datasheet_name: A name used in in kernel mapping of channels. It should
- * corrspond to the first name that the channel is referred
+ * correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
* possible compound name (e.g. IND-INC).
* @processed_val: Flag to specify the data access attribute should be
@@ -141,6 +163,7 @@ struct iio_chan_spec {
} scan_type;
long info_mask;
long event_mask;
+ const struct iio_chan_spec_ext_info *ext_info;
char *extend_name;
const char *datasheet_name;
unsigned processed_val:1;
@@ -197,12 +220,6 @@ static inline s64 iio_get_time_ns(void)
#define INDIO_ALL_BUFFER_MODES \
(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
-/* Vast majority of this is set by the industrialio subsystem on a
- * call to iio_device_register. */
-#define IIO_VAL_INT 1
-#define IIO_VAL_INT_PLUS_MICRO 2
-#define IIO_VAL_INT_PLUS_NANO 3
-
struct iio_trigger; /* forward declaration */
struct iio_dev;
@@ -226,7 +243,7 @@ struct iio_dev;
* @write_event_config: set if the event is enabled.
* @read_event_value: read a value associated with the event. Meaning
* is event dependant. event_code specifies which event.
- * @write_event_value: write the value associate with the event.
+ * @write_event_value: write the value associated with the event.
* Meaning is event dependent.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
@@ -269,6 +286,9 @@ struct iio_info {
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
+ int (*debugfs_reg_access)(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval);
};
/**
@@ -310,11 +330,14 @@ struct iio_buffer_setup_ops {
* @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
* @info: [DRIVER] callbacks and constant info from driver
+ * @info_exist_lock: [INTERN] lock to prevent use during removal
* @chrdev: [INTERN] associated character device
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
* @flags: [INTERN] file ops related flags including busy flag.
- **/
+ * @debugfs_dentry: [INTERN] device specific debugfs dentry.
+ * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ */
struct iio_dev {
int id;
@@ -327,9 +350,9 @@ struct iio_dev {
struct iio_buffer *buffer;
struct mutex mlock;
- unsigned long *available_scan_masks;
+ const unsigned long *available_scan_masks;
unsigned masklength;
- unsigned long *active_scan_mask;
+ const unsigned long *active_scan_mask;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -340,6 +363,7 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
#define IIO_MAX_GROUPS 6
@@ -347,6 +371,10 @@ struct iio_dev {
int groupcounter;
unsigned long flags;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+#endif
};
/**
@@ -424,4 +452,20 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
& (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
};
+/**
+ * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
+ * @indio_dev: IIO device info structure for device
+ **/
+#if defined(CONFIG_DEBUG_FS)
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return indio_dev->debugfs_dentry;
+};
+#else
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return NULL;
+};
+#endif
+
#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 107cfb1cbb0..c9dfcba0bac 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -49,4 +49,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#endif
+int iio_device_register_eventset(struct iio_dev *indio_dev);
+void iio_device_unregister_eventset(struct iio_dev *indio_dev);
+int iio_event_getfd(struct iio_dev *indio_dev);
+
#endif
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index cdbf289bfe2..f39f346bf04 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -32,7 +32,7 @@
* @chip: irq chip we are faking
* @base: base of irq range
* @enabled: mask of which irqs are enabled
- * @inuse: mask of which irqs actually have anyone connected
+ * @inuse: mask of which irqs are connected
* @lock: protect the evgen state
*/
struct iio_dummy_eventgen {
diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
new file mode 100644
index 00000000000..a603a5f51f9
--- /dev/null
+++ b/drivers/staging/iio/iio_hwmon.c
@@ -0,0 +1,232 @@
+/* Hwmon client for industrial I/O devices
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "consumer.h"
+#include "types.h"
+
+/**
+ * struct iio_hwmon_state - device instance state
+ * @channels: filled with array of channels from iio
+ * @num_channels: number of channels in channels (saves counting twice)
+ * @hwmon_dev: associated hwmon device
+ * @attr_group: the group of attributes
+ * @attrs: null terminated array of attribute pointers.
+ */
+struct iio_hwmon_state {
+ struct iio_channel *channels;
+ int num_channels;
+ struct device *hwmon_dev;
+ struct attribute_group attr_group;
+ struct attribute **attrs;
+};
+
+/*
+ * Assumes that IIO and hwmon operate in the same base units.
+ * This is supposed to be true, but needs verification for
+ * new channel types.
+ */
+static ssize_t iio_hwmon_read_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ long result;
+ int val, ret, scaleint, scalepart;
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct iio_hwmon_state *state = dev_get_drvdata(dev);
+
+ /*
+ * No locking between this pair, so theoretically possible
+ * the scale has changed.
+ */
+ ret = iio_st_read_channel_raw(&state->channels[sattr->index],
+ &val);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_st_read_channel_scale(&state->channels[sattr->index],
+ &scaleint, &scalepart);
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case IIO_VAL_INT:
+ result = val * scaleint;
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ result = (s64)val * (s64)scaleint +
+ div_s64((s64)val * (s64)scalepart, 1000000LL);
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ result = (s64)val * (s64)scaleint +
+ div_s64((s64)val * (s64)scalepart, 1000000000LL);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%ld\n", result);
+}
+
+static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
+{
+ int i;
+ struct sensor_device_attribute *a;
+ for (i = 0; i < st->num_channels; i++)
+ if (st->attrs[i]) {
+ a = to_sensor_dev_attr(
+ container_of(st->attrs[i],
+ struct device_attribute,
+ attr));
+ kfree(a);
+ }
+}
+
+static int __devinit iio_hwmon_probe(struct platform_device *pdev)
+{
+ struct iio_hwmon_state *st;
+ struct sensor_device_attribute *a;
+ int ret, i;
+ int in_i = 1, temp_i = 1, curr_i = 1;
+ enum iio_chan_type type;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st->channels = iio_st_channel_get_all(dev_name(&pdev->dev));
+ if (IS_ERR(st->channels)) {
+ ret = PTR_ERR(st->channels);
+ goto error_free_state;
+ }
+
+ /* count how many attributes we have */
+ while (st->channels[st->num_channels].indio_dev)
+ st->num_channels++;
+
+ st->attrs = kzalloc(sizeof(st->attrs) * (st->num_channels + 1),
+ GFP_KERNEL);
+ if (st->attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+ for (i = 0; i < st->num_channels; i++) {
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (a == NULL) {
+ ret = -ENOMEM;
+ goto error_free_attrs;
+ }
+
+ sysfs_attr_init(&a->dev_attr.attr);
+ ret = iio_st_get_channel_type(&st->channels[i], &type);
+ if (ret < 0) {
+ kfree(a);
+ goto error_free_attrs;
+ }
+ switch (type) {
+ case IIO_VOLTAGE:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "in%d_input",
+ in_i++);
+ break;
+ case IIO_TEMP:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
+ break;
+ case IIO_CURRENT:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
+ break;
+ default:
+ ret = -EINVAL;
+ kfree(a);
+ goto error_free_attrs;
+ }
+ if (a->dev_attr.attr.name == NULL) {
+ kfree(a);
+ ret = -ENOMEM;
+ goto error_free_attrs;
+ }
+ a->dev_attr.show = iio_hwmon_read_val;
+ a->dev_attr.attr.mode = S_IRUGO;
+ a->index = i;
+ st->attrs[i] = &a->dev_attr.attr;
+ }
+
+ st->attr_group.attrs = st->attrs;
+ platform_set_drvdata(pdev, st);
+ ret = sysfs_create_group(&pdev->dev.kobj, &st->attr_group);
+ if (ret < 0)
+ goto error_free_attrs;
+
+ st->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(st->hwmon_dev)) {
+ ret = PTR_ERR(st->hwmon_dev);
+ goto error_remove_group;
+ }
+ return 0;
+
+error_remove_group:
+ sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
+error_free_attrs:
+ iio_hwmon_free_attrs(st);
+ kfree(st->attrs);
+error_release_channels:
+ iio_st_channel_release_all(st->channels);
+error_free_state:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit iio_hwmon_remove(struct platform_device *pdev)
+{
+ struct iio_hwmon_state *st = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(st->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
+ iio_hwmon_free_attrs(st);
+ kfree(st->attrs);
+ iio_st_channel_release_all(st->channels);
+
+ return 0;
+}
+
+static struct platform_driver __refdata iio_hwmon_driver = {
+ .driver = {
+ .name = "iio_hwmon",
+ .owner = THIS_MODULE,
+ },
+ .probe = iio_hwmon_probe,
+ .remove = __devexit_p(iio_hwmon_remove),
+};
+
+static int iio_inkern_init(void)
+{
+ return platform_driver_register(&iio_hwmon_driver);
+}
+module_init(iio_inkern_init);
+
+static void iio_inkern_exit(void)
+{
+ platform_driver_unregister(&iio_hwmon_driver);
+}
+module_exit(iio_inkern_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("IIO to hwmon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index d6a1c0e82a5..bb4daf74436 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -142,8 +142,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
}
indio_dev->buffer = buffer;
- /* Tell the core how to access the buffer */
- buffer->access = &kfifo_access_funcs;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9a2ca55625f..cd82b56d58a 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -607,9 +607,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
if (!indio_dev->buffer)
return -ENOMEM;
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index ac22de573f3..8daa038b23e 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -187,8 +187,6 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16400_ring_setup_ops;
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index d7b1e9e435a..386ba760f3f 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -489,9 +489,9 @@ ssize_t iio_buffer_show_enable(struct device *dev,
EXPORT_SYMBOL(iio_buffer_show_enable);
/* note NULL used as error indicator as it doesn't make sense. */
-static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
+static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
- unsigned long *mask)
+ const unsigned long *mask)
{
if (bitmap_empty(mask, masklength))
return NULL;
@@ -554,7 +554,7 @@ EXPORT_SYMBOL(iio_sw_buffer_preenable);
int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
- unsigned long *mask;
+ const unsigned long *mask;
unsigned long *trialmask;
trialmask = kmalloc(sizeof(*trialmask)*
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 19f897f3c85..d303bfbff27 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
#include "iio.h"
#include "iio_core.h"
#include "iio_core_trigger.h"
@@ -39,6 +40,8 @@ struct bus_type iio_bus_type = {
};
EXPORT_SYMBOL(iio_bus_type);
+static struct dentry *iio_debugfs_dentry;
+
static const char * const iio_data_type_name[] = {
[IIO_RAW] = "raw",
[IIO_PROCESSED] = "input",
@@ -100,71 +103,6 @@ const struct iio_chan_spec
return NULL;
}
-/**
- * struct iio_detected_event_list - list element for events that have occurred
- * @list: linked list header
- * @ev: the event itself
- */
-struct iio_detected_event_list {
- struct list_head list;
- struct iio_event_data ev;
-};
-
-/**
- * struct iio_event_interface - chrdev interface for an event line
- * @dev: device assocated with event interface
- * @wait: wait queue to allow blocking reads of events
- * @event_list_lock: mutex to protect the list of detected events
- * @det_events: list of detected events
- * @max_events: maximum number of events before new ones are dropped
- * @current_events: number of events in detected list
- * @flags: file operations related flags including busy flag.
- */
-struct iio_event_interface {
- wait_queue_head_t wait;
- struct mutex event_list_lock;
- struct list_head det_events;
- int max_events;
- int current_events;
- struct list_head dev_attr_list;
- unsigned long flags;
- struct attribute_group group;
-};
-
-int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
-{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- struct iio_detected_event_list *ev;
- int ret = 0;
-
- /* Does anyone care? */
- mutex_lock(&ev_int->event_list_lock);
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- if (ev_int->current_events == ev_int->max_events) {
- mutex_unlock(&ev_int->event_list_lock);
- return 0;
- }
- ev = kmalloc(sizeof(*ev), GFP_KERNEL);
- if (ev == NULL) {
- ret = -ENOMEM;
- mutex_unlock(&ev_int->event_list_lock);
- goto error_ret;
- }
- ev->ev.id = ev_code;
- ev->ev.timestamp = timestamp;
-
- list_add_tail(&ev->list, &ev_int->det_events);
- ev_int->current_events++;
- mutex_unlock(&ev_int->event_list_lock);
- wake_up_interruptible(&ev_int->wait);
- } else
- mutex_unlock(&ev_int->event_list_lock);
-
-error_ret:
- return ret;
-}
-EXPORT_SYMBOL(iio_push_event);
-
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
@@ -174,143 +112,189 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-static ssize_t iio_event_chrdev_read(struct file *filep,
- char __user *buf,
- size_t count,
- loff_t *f_ps)
+static int __init iio_init(void)
{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el;
- size_t len = sizeof(el->ev);
int ret;
- if (count < len)
- return -EINVAL;
-
- mutex_lock(&ev_int->event_list_lock);
- if (list_empty(&ev_int->det_events)) {
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error_mutex_unlock;
- }
- mutex_unlock(&ev_int->event_list_lock);
- /* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible(ev_int->wait,
- !list_empty(&ev_int
- ->det_events));
- if (ret)
- goto error_ret;
- /* Single access device so no one else can get the data */
- mutex_lock(&ev_int->event_list_lock);
+ /* Register sysfs bus */
+ ret = bus_register(&iio_bus_type);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "%s could not register bus type\n",
+ __FILE__);
+ goto error_nothing;
}
- el = list_first_entry(&ev_int->det_events,
- struct iio_detected_event_list,
- list);
- if (copy_to_user(buf, &(el->ev), len)) {
- ret = -EFAULT;
- goto error_mutex_unlock;
+ ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to allocate char dev region\n",
+ __FILE__);
+ goto error_unregister_bus_type;
}
- list_del(&el->list);
- ev_int->current_events--;
- mutex_unlock(&ev_int->event_list_lock);
- kfree(el);
- return len;
+ iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
-error_mutex_unlock:
- mutex_unlock(&ev_int->event_list_lock);
-error_ret:
+ return 0;
+error_unregister_bus_type:
+ bus_unregister(&iio_bus_type);
+error_nothing:
return ret;
}
-static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+static void __exit iio_exit(void)
{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el, *t;
+ if (iio_devt)
+ unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
+ bus_unregister(&iio_bus_type);
+ debugfs_remove(iio_debugfs_dentry);
+}
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- /*
- * In order to maintain a clean state for reopening,
- * clear out any awaiting events. The mask will prevent
- * any new __iio_push_event calls running.
- */
- list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
- list_del(&el->list);
- kfree(el);
- }
- ev_int->current_events = 0;
- mutex_unlock(&ev_int->event_list_lock);
+#if defined(CONFIG_DEBUG_FS)
+static int iio_debugfs_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
return 0;
}
-static const struct file_operations iio_event_chrdev_fileops = {
- .read = iio_event_chrdev_read,
- .release = iio_event_chrdev_release,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static int iio_event_getfd(struct iio_dev *indio_dev)
+static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- int fd;
+ struct iio_dev *indio_dev = file->private_data;
+ char buf[20];
+ unsigned val = 0;
+ ssize_t len;
+ int ret;
- if (ev_int == NULL)
- return -ENODEV;
+ ret = indio_dev->info->debugfs_reg_access(indio_dev,
+ indio_dev->cached_reg_addr,
+ 0, &val);
+ if (ret)
+ dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
- mutex_lock(&ev_int->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- mutex_unlock(&ev_int->event_list_lock);
- return -EBUSY;
- }
- mutex_unlock(&ev_int->event_list_lock);
- fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops, ev_int, O_RDONLY);
- if (fd < 0) {
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&ev_int->event_list_lock);
- }
- return fd;
+ len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
-static int __init iio_init(void)
+static ssize_t iio_debugfs_write_reg(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
{
+ struct iio_dev *indio_dev = file->private_data;
+ unsigned reg, val;
+ char buf[80];
int ret;
- /* Register sysfs bus */
- ret = bus_register(&iio_bus_type);
- if (ret < 0) {
- printk(KERN_ERR
- "%s could not register bus type\n",
- __FILE__);
- goto error_nothing;
+ count = min_t(size_t, count, (sizeof(buf)-1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ ret = sscanf(buf, "%i %i", &reg, &val);
+
+ switch (ret) {
+ case 1:
+ indio_dev->cached_reg_addr = reg;
+ break;
+ case 2:
+ indio_dev->cached_reg_addr = reg;
+ ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
+ val, NULL);
+ if (ret) {
+ dev_err(indio_dev->dev.parent, "%s: write failed\n",
+ __func__);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
}
- ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
- goto error_unregister_bus_type;
+ return count;
+}
+
+static const struct file_operations iio_debugfs_reg_fops = {
+ .open = iio_debugfs_open,
+ .read = iio_debugfs_read_reg,
+ .write = iio_debugfs_write_reg,
+};
+
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
+ debugfs_remove_recursive(indio_dev->debugfs_dentry);
+}
+
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ struct dentry *d;
+
+ if (indio_dev->info->debugfs_reg_access == NULL)
+ return 0;
+
+ if (IS_ERR(iio_debugfs_dentry))
+ return 0;
+
+ indio_dev->debugfs_dentry =
+ debugfs_create_dir(dev_name(&indio_dev->dev),
+ iio_debugfs_dentry);
+ if (IS_ERR(indio_dev->debugfs_dentry))
+ return PTR_ERR(indio_dev->debugfs_dentry);
+
+ if (indio_dev->debugfs_dentry == NULL) {
+ dev_warn(indio_dev->dev.parent,
+ "Failed to create debugfs directory\n");
+ return -EFAULT;
+ }
+
+ d = debugfs_create_file("direct_reg_access", 0644,
+ indio_dev->debugfs_dentry,
+ indio_dev, &iio_debugfs_reg_fops);
+ if (!d) {
+ iio_device_unregister_debugfs(indio_dev);
+ return -ENOMEM;
}
return 0;
+}
+#else
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ return 0;
+}
-error_unregister_bus_type:
- bus_unregister(&iio_bus_type);
-error_nothing:
- return ret;
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
}
+#endif /* CONFIG_DEBUG_FS */
-static void __exit iio_exit(void)
+static ssize_t iio_read_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- if (iio_devt)
- unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
- bus_unregister(&iio_bus_type);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->read(indio_dev, this_attr->c, buf);
+}
+
+static ssize_t iio_write_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->write(indio_dev, this_attr->c, buf, len);
}
static ssize_t iio_read_channel_info(struct device *dev,
@@ -455,7 +439,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
goto error_ret;
}
- if (chan->differential) { /* Differential can not have modifier */
+ if (chan->differential) { /* Differential can not have modifier */
if (generic)
name_format
= kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
@@ -592,6 +576,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret, i, attrcount = 0;
+ const struct iio_chan_spec_ext_info *ext_info;
if (chan->channel < 0)
return 0;
@@ -626,6 +611,31 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
goto error_ret;
attrcount++;
}
+
+ if (chan->ext_info) {
+ unsigned int i = 0;
+ for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
+ ret = __iio_add_chan_devattr(ext_info->name,
+ chan,
+ ext_info->read ?
+ &iio_read_channel_ext_info : NULL,
+ ext_info->write ?
+ &iio_write_channel_ext_info : NULL,
+ i,
+ ext_info->shared,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ i++;
+ if (ret == -EBUSY && ext_info->shared)
+ continue;
+
+ if (ret)
+ goto error_ret;
+
+ attrcount++;
+ }
+ }
+
ret = attrcount;
error_ret:
return ret;
@@ -663,7 +673,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
attrcount = attrcount_orig;
/*
* New channel registration method - relies on the fact a group does
- * not need to be initialized if it is name is NULL.
+ * not need to be initialized if it is name is NULL.
*/
INIT_LIST_HEAD(&indio_dev->channel_attr_list);
if (indio_dev->channels)
@@ -726,295 +736,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
kfree(indio_dev->chan_attr_group.attrs);
}
-static const char * const iio_ev_type_text[] = {
- [IIO_EV_TYPE_THRESH] = "thresh",
- [IIO_EV_TYPE_MAG] = "mag",
- [IIO_EV_TYPE_ROC] = "roc",
- [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
- [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
-};
-
-static const char * const iio_ev_dir_text[] = {
- [IIO_EV_DIR_EITHER] = "either",
- [IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
-};
-
-static ssize_t iio_ev_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- bool val;
-
- ret = strtobool(buf, &val);
- if (ret < 0)
- return ret;
-
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address,
- val);
- return (ret < 0) ? ret : len;
-}
-
-static ssize_t iio_ev_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
-
- if (val < 0)
- return val;
- else
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val, ret;
-
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long val;
- int ret;
-
- if (!indio_dev->info->write_event_value)
- return -EINVAL;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
- val);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- int ret = 0, i, attrcount = 0;
- u64 mask = 0;
- char *postfix;
- if (!chan->event_mask)
- return 0;
-
- for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
- postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
- else if (chan->differential)
- mask = IIO_EVENT_CODE(chan->type,
- 0, 0,
- i%IIO_EV_DIR_MAX,
- i/IIO_EV_DIR_MAX,
- 0,
- chan->channel,
- chan->channel2);
- else
- mask = IIO_UNMOD_EVENT_CODE(chan->type,
- chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
-
- ret = __iio_add_chan_devattr(postfix,
- chan,
- &iio_ev_state_show,
- iio_ev_state_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = __iio_add_chan_devattr(postfix, chan,
- iio_ev_value_show,
- iio_ev_value_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- }
- ret = attrcount;
-error_ret:
- return ret;
-}
-
-static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p, *n;
- list_for_each_entry_safe(p, n,
- &indio_dev->event_interface->
- dev_attr_list, l) {
- kfree(p->dev_attr.attr.name);
- kfree(p);
- }
-}
-
-static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
-{
- int j, ret, attrcount = 0;
-
- INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
- /* Dynically created from the channels array */
- for (j = 0; j < indio_dev->num_channels; j++) {
- ret = iio_device_add_event_sysfs(indio_dev,
- &indio_dev->channels[j]);
- if (ret < 0)
- goto error_clear_attrs;
- attrcount += ret;
- }
- return attrcount;
-
-error_clear_attrs:
- __iio_remove_event_config_attrs(indio_dev);
-
- return ret;
-}
-
-static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
-{
- int j;
-
- for (j = 0; j < indio_dev->num_channels; j++)
- if (indio_dev->channels[j].event_mask != 0)
- return true;
- return false;
-}
-
-static void iio_setup_ev_int(struct iio_event_interface *ev_int)
-{
- mutex_init(&ev_int->event_list_lock);
- /* discussion point - make this variable? */
- ev_int->max_events = 10;
- ev_int->current_events = 0;
- INIT_LIST_HEAD(&ev_int->det_events);
- init_waitqueue_head(&ev_int->wait);
-}
-
-static const char *iio_event_group_name = "events";
-static int iio_device_register_eventset(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p;
- int ret = 0, attrcount_orig = 0, attrcount, attrn;
- struct attribute **attr;
-
- if (!(indio_dev->info->event_attrs ||
- iio_check_for_dynamic_events(indio_dev)))
- return 0;
-
- indio_dev->event_interface =
- kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (indio_dev->event_interface == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- iio_setup_ev_int(indio_dev->event_interface);
- if (indio_dev->info->event_attrs != NULL) {
- attr = indio_dev->info->event_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
- if (indio_dev->channels) {
- ret = __iio_add_event_config_attrs(indio_dev);
- if (ret < 0)
- goto error_free_setup_event_lines;
- attrcount += ret;
- }
-
- indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
- sizeof(indio_dev->event_interface->group.attrs[0]),
- GFP_KERNEL);
- if (indio_dev->event_interface->group.attrs == NULL) {
- ret = -ENOMEM;
- goto error_free_setup_event_lines;
- }
- if (indio_dev->info->event_attrs)
- memcpy(indio_dev->event_interface->group.attrs,
- indio_dev->info->event_attrs->attrs,
- sizeof(indio_dev->event_interface->group.attrs[0])
- *attrcount_orig);
- attrn = attrcount_orig;
- /* Add all elements from the list. */
- list_for_each_entry(p,
- &indio_dev->event_interface->dev_attr_list,
- l)
- indio_dev->event_interface->group.attrs[attrn++] =
- &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] =
- &indio_dev->event_interface->group;
-
- return 0;
-
-error_free_setup_event_lines:
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface);
-error_ret:
-
- return ret;
-}
-
-static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
-{
- if (indio_dev->event_interface == NULL)
- return;
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface->group.attrs);
- kfree(indio_dev->event_interface);
-}
-
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
@@ -1023,6 +744,7 @@ static void iio_dev_release(struct device *device)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
+ iio_device_unregister_debugfs(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -1052,6 +774,7 @@ struct iio_dev *iio_allocate_device(int sizeof_priv)
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
@@ -1131,6 +854,8 @@ static const struct file_operations iio_buffer_fileops = {
.compat_ioctl = iio_ioctl,
};
+static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+
int iio_device_register(struct iio_dev *indio_dev)
{
int ret;
@@ -1138,11 +863,17 @@ int iio_device_register(struct iio_dev *indio_dev)
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
+ ret = iio_device_register_debugfs(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to register debugfs interfaces\n");
+ goto error_ret;
+ }
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
- goto error_ret;
+ goto error_unreg_debugfs;
}
ret = iio_device_register_eventset(indio_dev);
if (ret) {
@@ -1153,6 +884,10 @@ int iio_device_register(struct iio_dev *indio_dev)
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_register_trigger_consumer(indio_dev);
+ if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
+ indio_dev->setup_ops == NULL)
+ indio_dev->setup_ops = &noop_ring_setup_ops;
+
ret = device_add(&indio_dev->dev);
if (ret < 0)
goto error_unreg_eventset;
@@ -1169,6 +904,8 @@ error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
+error_unreg_debugfs:
+ iio_device_unregister_debugfs(indio_dev);
error_ret:
return ret;
}
@@ -1176,6 +913,9 @@ EXPORT_SYMBOL(iio_device_register);
void iio_device_unregister(struct iio_dev *indio_dev)
{
+ mutex_lock(&indio_dev->info_exist_lock);
+ indio_dev->info = NULL;
+ mutex_unlock(&indio_dev->info_exist_lock);
device_unregister(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
diff --git a/drivers/staging/iio/industrialio-event.c b/drivers/staging/iio/industrialio-event.c
new file mode 100644
index 00000000000..5fdf739e38f
--- /dev/null
+++ b/drivers/staging/iio/industrialio-event.c
@@ -0,0 +1,453 @@
+/* Industrial I/O event handling
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "events.h"
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @wait: wait queue to allow blocking reads of events
+ * @det_events: list of detected events
+ * @dev_attr_list: list of event interface sysfs attribute
+ * @flags: file operations related flags including busy flag.
+ * @group: event interface sysfs attribute group
+ */
+struct iio_event_interface {
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(det_events, struct iio_event_data, 16);
+
+ struct list_head dev_attr_list;
+ unsigned long flags;
+ struct attribute_group group;
+};
+
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ struct iio_event_data ev;
+ int copied;
+
+ /* Does anyone care? */
+ spin_lock(&ev_int->wait.lock);
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+
+ ev.id = ev_code;
+ ev.timestamp = timestamp;
+
+ copied = kfifo_put(&ev_int->det_events, &ev);
+ if (copied != 0)
+ wake_up_locked_poll(&ev_int->wait, POLLIN);
+ }
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_push_event);
+
+/**
+ * iio_event_poll() - poll the event queue to find out if it has data
+ */
+static unsigned int iio_event_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int events = 0;
+
+ poll_wait(filep, &ev_int->wait, wait);
+
+ spin_lock(&ev_int->wait.lock);
+ if (!kfifo_is_empty(&ev_int->det_events))
+ events = POLLIN | POLLRDNORM;
+ spin_unlock(&ev_int->wait.lock);
+
+ return events;
+}
+
+static ssize_t iio_event_chrdev_read(struct file *filep,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int copied;
+ int ret;
+
+ if (count < sizeof(struct iio_event_data))
+ return -EINVAL;
+
+ spin_lock(&ev_int->wait.lock);
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error_unlock;
+ }
+ /* Blocking on device; waiting for something to be there */
+ ret = wait_event_interruptible_locked(ev_int->wait,
+ !kfifo_is_empty(&ev_int->det_events));
+ if (ret)
+ goto error_unlock;
+ /* Single access device so no one else can get the data */
+ }
+
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+
+error_unlock:
+ spin_unlock(&ev_int->wait.lock);
+
+ return ret ? ret : copied;
+}
+
+static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ /*
+ * In order to maintain a clean state for reopening,
+ * clear out any awaiting events. The mask will prevent
+ * any new __iio_push_event calls running.
+ */
+ kfifo_reset_out(&ev_int->det_events);
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+
+static const struct file_operations iio_event_chrdev_fileops = {
+ .read = iio_event_chrdev_read,
+ .poll = iio_event_poll,
+ .release = iio_event_chrdev_release,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+int iio_event_getfd(struct iio_dev *indio_dev)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
+ return -ENODEV;
+
+ spin_lock(&ev_int->wait.lock);
+ if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ spin_unlock(&ev_int->wait.lock);
+ return -EBUSY;
+ }
+ spin_unlock(&ev_int->wait.lock);
+ fd = anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ if (fd < 0) {
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ spin_unlock(&ev_int->wait.lock);
+ }
+ return fd;
+}
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static ssize_t iio_ev_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ bool val;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->address,
+ val);
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t iio_ev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->address);
+
+ if (val < 0)
+ return val;
+ else
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, ret;
+
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->address, &val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long val;
+ int ret;
+
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ u64 mask = 0;
+ char *postfix;
+ if (!chan->event_mask)
+ return 0;
+
+ for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ if (chan->modified)
+ mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+ else if (chan->differential)
+ mask = IIO_EVENT_CODE(chan->type,
+ 0, 0,
+ i%IIO_EV_DIR_MAX,
+ i/IIO_EV_DIR_MAX,
+ 0,
+ chan->channel,
+ chan->channel2);
+ else
+ mask = IIO_UNMOD_EVENT_CODE(chan->type,
+ chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+
+ ret = __iio_add_chan_devattr(postfix,
+ chan,
+ &iio_ev_state_show,
+ iio_ev_state_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = __iio_add_chan_devattr(postfix, chan,
+ iio_ev_value_show,
+ iio_ev_value_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ }
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ list_for_each_entry_safe(p, n,
+ &indio_dev->event_interface->
+ dev_attr_list, l) {
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+ }
+}
+
+static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
+{
+ int j, ret, attrcount = 0;
+
+ INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
+ /* Dynically created from the channels array */
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ ret = iio_device_add_event_sysfs(indio_dev,
+ &indio_dev->channels[j]);
+ if (ret < 0)
+ goto error_clear_attrs;
+ attrcount += ret;
+ }
+ return attrcount;
+
+error_clear_attrs:
+ __iio_remove_event_config_attrs(indio_dev);
+
+ return ret;
+}
+
+static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
+{
+ int j;
+
+ for (j = 0; j < indio_dev->num_channels; j++)
+ if (indio_dev->channels[j].event_mask != 0)
+ return true;
+ return false;
+}
+
+static void iio_setup_ev_int(struct iio_event_interface *ev_int)
+{
+ INIT_KFIFO(ev_int->det_events);
+ init_waitqueue_head(&ev_int->wait);
+}
+
+static const char *iio_event_group_name = "events";
+int iio_device_register_eventset(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p;
+ int ret = 0, attrcount_orig = 0, attrcount, attrn;
+ struct attribute **attr;
+
+ if (!(indio_dev->info->event_attrs ||
+ iio_check_for_dynamic_events(indio_dev)))
+ return 0;
+
+ indio_dev->event_interface =
+ kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (indio_dev->event_interface == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ iio_setup_ev_int(indio_dev->event_interface);
+ if (indio_dev->info->event_attrs != NULL) {
+ attr = indio_dev->info->event_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ if (indio_dev->channels) {
+ ret = __iio_add_event_config_attrs(indio_dev);
+ if (ret < 0)
+ goto error_free_setup_event_lines;
+ attrcount += ret;
+ }
+
+ indio_dev->event_interface->group.name = iio_event_group_name;
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
+ if (indio_dev->event_interface->group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_setup_event_lines;
+ }
+ if (indio_dev->info->event_attrs)
+ memcpy(indio_dev->event_interface->group.attrs,
+ indio_dev->info->event_attrs->attrs,
+ sizeof(indio_dev->event_interface->group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p,
+ &indio_dev->event_interface->dev_attr_list,
+ l)
+ indio_dev->event_interface->group.attrs[attrn++] =
+ &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->event_interface->group;
+
+ return 0;
+
+error_free_setup_event_lines:
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface);
+error_ret:
+
+ return ret;
+}
+
+void iio_device_unregister_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface->group.attrs);
+ kfree(indio_dev->event_interface);
+}
diff --git a/drivers/staging/iio/inkern.c b/drivers/staging/iio/inkern.c
new file mode 100644
index 00000000000..de2c8ea6496
--- /dev/null
+++ b/drivers/staging/iio/inkern.c
@@ -0,0 +1,292 @@
+/* The industrial I/O core in kernel channel mapping
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "iio.h"
+#include "iio_core.h"
+#include "machine.h"
+#include "driver.h"
+#include "consumer.h"
+
+struct iio_map_internal {
+ struct iio_dev *indio_dev;
+ struct iio_map *map;
+ struct list_head l;
+};
+
+static LIST_HEAD(iio_map_list);
+static DEFINE_MUTEX(iio_map_list_lock);
+
+int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
+{
+ int i = 0, ret = 0;
+ struct iio_map_internal *mapi;
+
+ if (maps == NULL)
+ return 0;
+
+ mutex_lock(&iio_map_list_lock);
+ while (maps[i].consumer_dev_name != NULL) {
+ mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
+ if (mapi == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ mapi->map = &maps[i];
+ mapi->indio_dev = indio_dev;
+ list_add(&mapi->l, &iio_map_list);
+ i++;
+ }
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_register);
+
+
+/* Assumes the exact same array (e.g. memory locations)
+ * used at unregistration as used at registration rather than
+ * more complex checking of contents.
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev,
+ struct iio_map *maps)
+{
+ int i = 0, ret = 0;
+ bool found_it;
+ struct iio_map_internal *mapi;
+
+ if (maps == NULL)
+ return 0;
+
+ mutex_lock(&iio_map_list_lock);
+ while (maps[i].consumer_dev_name != NULL) {
+ found_it = false;
+ list_for_each_entry(mapi, &iio_map_list, l)
+ if (&maps[i] == mapi->map) {
+ list_del(&mapi->l);
+ kfree(mapi);
+ found_it = true;
+ break;
+ }
+ if (found_it == false) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+ }
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_unregister);
+
+static const struct iio_chan_spec
+*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
+ const char *name)
+{
+ int i;
+ const struct iio_chan_spec *chan = NULL;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].datasheet_name &&
+ strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
+ chan = &indio_dev->channels[i];
+ break;
+ }
+ return chan;
+}
+
+
+struct iio_channel *iio_st_channel_get(const char *name,
+ const char *channel_name)
+{
+ struct iio_map_internal *c_i = NULL, *c = NULL;
+ struct iio_channel *channel;
+
+ if (name == NULL && channel_name == NULL)
+ return ERR_PTR(-ENODEV);
+
+ /* first find matching entry the channel map */
+ mutex_lock(&iio_map_list_lock);
+ list_for_each_entry(c_i, &iio_map_list, l) {
+ if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
+ (channel_name &&
+ strcmp(channel_name, c_i->map->consumer_channel) != 0))
+ continue;
+ c = c_i;
+ get_device(&c->indio_dev->dev);
+ break;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ if (c == NULL)
+ return ERR_PTR(-ENODEV);
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (channel == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ channel->indio_dev = c->indio_dev;
+
+ if (c->map->adc_channel_label)
+ channel->channel =
+ iio_chan_spec_from_name(channel->indio_dev,
+ c->map->adc_channel_label);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_get);
+
+void iio_st_channel_release(struct iio_channel *channel)
+{
+ put_device(&channel->indio_dev->dev);
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_release);
+
+struct iio_channel *iio_st_channel_get_all(const char *name)
+{
+ struct iio_channel *chans;
+ struct iio_map_internal *c = NULL;
+ int nummaps = 0;
+ int mapind = 0;
+ int i, ret;
+
+ if (name == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&iio_map_list_lock);
+ /* first count the matching maps */
+ list_for_each_entry(c, &iio_map_list, l)
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ else
+ nummaps++;
+
+ if (nummaps == 0) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+
+ /* NULL terminated array to save passing size */
+ chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
+ if (chans == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* for each map fill in the chans element */
+ list_for_each_entry(c, &iio_map_list, l) {
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ chans[mapind].indio_dev = c->indio_dev;
+ chans[mapind].channel =
+ iio_chan_spec_from_name(chans[mapind].indio_dev,
+ c->map->adc_channel_label);
+ if (chans[mapind].channel == NULL) {
+ ret = -EINVAL;
+ put_device(&chans[mapind].indio_dev->dev);
+ goto error_free_chans;
+ }
+ get_device(&chans[mapind].indio_dev->dev);
+ mapind++;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ if (mapind == 0) {
+ ret = -ENODEV;
+ goto error_free_chans;
+ }
+ return chans;
+
+error_free_chans:
+ for (i = 0; i < nummaps; i++)
+ if (chans[i].indio_dev)
+ put_device(&chans[i].indio_dev->dev);
+ kfree(chans);
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
+
+void iio_st_channel_release_all(struct iio_channel *channels)
+{
+ struct iio_channel *chan = &channels[0];
+
+ while (chan->indio_dev) {
+ put_device(&chan->indio_dev->dev);
+ chan++;
+ }
+ kfree(channels);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
+
+int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
+{
+ int val2, ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
+ val, &val2, 0);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
+
+int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+ chan->channel,
+ val, val2,
+ IIO_CHAN_INFO_SCALE);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
+
+int iio_st_get_channel_type(struct iio_channel *chan,
+ enum iio_chan_type *type)
+{
+ int ret = 0;
+ /* Need to verify underlying driver has not gone away */
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ *type = chan->channel->type;
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_get_channel_type);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e1e9c06cde4..9f3bd59c0e7 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -59,21 +59,6 @@ static struct attribute_group iio_kfifo_attribute_group = {
.name = "buffer",
};
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
-{
- struct iio_kfifo *kf;
-
- kf = kzalloc(sizeof *kf, GFP_KERNEL);
- if (!kf)
- return NULL;
- kf->update_needed = true;
- iio_buffer_init(&kf->buffer);
- kf->buffer.attrs = &iio_kfifo_attribute_group;
-
- return &kf->buffer;
-}
-EXPORT_SYMBOL(iio_kfifo_allocate);
-
static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
{
return r->bytes_per_datum;
@@ -104,12 +89,6 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
return 0;
}
-void iio_kfifo_free(struct iio_buffer *r)
-{
- kfree(iio_to_kfifo(r));
-}
-EXPORT_SYMBOL(iio_kfifo_free);
-
static int iio_store_to_kfifo(struct iio_buffer *r,
u8 *data,
s64 timestamp)
@@ -137,7 +116,7 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied;
}
-const struct iio_buffer_access_funcs kfifo_access_funcs = {
+static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.request_update = &iio_request_update_kfifo,
@@ -146,6 +125,27 @@ const struct iio_buffer_access_funcs kfifo_access_funcs = {
.get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
};
-EXPORT_SYMBOL(kfifo_access_funcs);
+
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_kfifo *kf;
+
+ kf = kzalloc(sizeof *kf, GFP_KERNEL);
+ if (!kf)
+ return NULL;
+ kf->update_needed = true;
+ iio_buffer_init(&kf->buffer);
+ kf->buffer.attrs = &iio_kfifo_attribute_group;
+ kf->buffer.access = &kfifo_access_funcs;
+
+ return &kf->buffer;
+}
+EXPORT_SYMBOL(iio_kfifo_allocate);
+
+void iio_kfifo_free(struct iio_buffer *r)
+{
+ kfree(iio_to_kfifo(r));
+}
+EXPORT_SYMBOL(iio_kfifo_free);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index cc2bd9a1ccf..9f7da016af0 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -3,8 +3,6 @@
#include "iio.h"
#include "buffer.h"
-extern const struct iio_buffer_access_funcs kfifo_access_funcs;
-
struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
void iio_kfifo_free(struct iio_buffer *r);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 849d6a564af..38ec52b65df 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -592,11 +592,18 @@ static const struct i2c_device_id isl29018_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29018_id);
+static const struct of_device_id isl29018_of_match[] = {
+ { .compatible = "invn,isl29018", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isl29018_of_match);
+
static struct i2c_driver isl29018_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29018",
.owner = THIS_MODULE,
+ .of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
.remove = __devexit_p(isl29018_remove),
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index ffca85e81ef..546c95a4ea9 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -118,7 +118,7 @@ struct tsl2563_chip {
struct delayed_work poweroff_work;
/* Remember state for suspend and resume functions */
- pm_message_t state;
+ bool suspended;
struct tsl2563_gainlevel_coeff const *gainlevel;
@@ -315,7 +315,7 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
int retry = 1;
int ret = 0;
- if (chip->state.event != PM_EVENT_ON)
+ if (chip->suspended)
goto out;
if (!chip->int_enabled) {
@@ -708,7 +708,6 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
struct tsl2563_chip *chip;
struct tsl2563_platform_data *pdata = client->dev.platform_data;
int err = 0;
- int ret;
u8 id = 0;
indio_dev = iio_allocate_device(sizeof(*chip));
@@ -722,13 +721,15 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
err = tsl2563_detect(chip);
if (err) {
- dev_err(&client->dev, "device not found, error %d\n", -err);
+ dev_err(&client->dev, "detect error %d\n", -err);
goto fail1;
}
err = tsl2563_read_id(chip, &id);
- if (err)
+ if (err) {
+ dev_err(&client->dev, "read id error %d\n", -err);
goto fail1;
+ }
mutex_init(&chip->lock);
@@ -751,40 +752,52 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+
if (client->irq)
indio_dev->info = &tsl2563_info;
else
indio_dev->info = &tsl2563_info_no_irq;
+
if (client->irq) {
- ret = request_threaded_irq(client->irq,
+ err = request_threaded_irq(client->irq,
NULL,
&tsl2563_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"tsl2563_event",
indio_dev);
- if (ret)
- goto fail2;
+ if (err) {
+ dev_err(&client->dev, "irq request error %d\n", -err);
+ goto fail1;
+ }
}
+
err = tsl2563_configure(chip);
- if (err)
- goto fail3;
+ if (err) {
+ dev_err(&client->dev, "configure error %d\n", -err);
+ goto fail2;
+ }
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
+
/* The interrupt cannot yet be enabled so this is fine without lock */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
- ret = iio_device_register(indio_dev);
- if (ret)
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "iio registration error %d\n", -err);
goto fail3;
+ }
return 0;
+
fail3:
+ cancel_delayed_work(&chip->poweroff_work);
+ flush_scheduled_work();
+fail2:
if (client->irq)
free_irq(client->irq, indio_dev);
-fail2:
- iio_free_device(indio_dev);
fail1:
- kfree(chip);
+ iio_free_device(indio_dev);
return err;
}
@@ -810,9 +823,10 @@ static int tsl2563_remove(struct i2c_client *client)
return 0;
}
-static int tsl2563_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tsl2563_suspend(struct device *dev)
{
- struct tsl2563_chip *chip = i2c_get_clientdata(client);
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
@@ -821,16 +835,16 @@ static int tsl2563_suspend(struct i2c_client *client, pm_message_t state)
if (ret)
goto out;
- chip->state = state;
+ chip->suspended = true;
out:
mutex_unlock(&chip->lock);
return ret;
}
-static int tsl2563_resume(struct i2c_client *client)
+static int tsl2563_resume(struct device *dev)
{
- struct tsl2563_chip *chip = i2c_get_clientdata(client);
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
@@ -843,13 +857,19 @@ static int tsl2563_resume(struct i2c_client *client)
if (ret)
goto out;
- chip->state.event = PM_EVENT_ON;
+ chip->suspended = false;
out:
mutex_unlock(&chip->lock);
return ret;
}
+static SIMPLE_DEV_PM_OPS(tsl2563_pm_ops, tsl2563_suspend, tsl2563_resume);
+#define TSL2563_PM_OPS (&tsl2563_pm_ops)
+#else
+#define TSL2563_PM_OPS NULL
+#endif
+
static const struct i2c_device_id tsl2563_id[] = {
{ "tsl2560", 0 },
{ "tsl2561", 1 },
@@ -862,9 +882,8 @@ MODULE_DEVICE_TABLE(i2c, tsl2563_id);
static struct i2c_driver tsl2563_i2c_driver = {
.driver = {
.name = "tsl2563",
+ .pm = TSL2563_PM_OPS,
},
- .suspend = tsl2563_suspend,
- .resume = tsl2563_resume,
.probe = tsl2563_probe,
.remove = __devexit_p(tsl2563_remove),
.id_table = tsl2563_id,
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 5b6455a238d..8671d98e044 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -113,7 +113,7 @@ struct taos_lux {
/* This structure is intentionally large to accommodate updates via sysfs. */
/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is is one and only one type of glass used */
+/* Assumption is one and only one type of glass used */
static struct taos_lux taos_device_lux[11] = {
{ 9830, 8520, 15729 },
{ 12452, 10807, 23344 },
@@ -884,9 +884,10 @@ fail2:
return ret;
}
-static int taos_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int taos_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -901,9 +902,9 @@ static int taos_suspend(struct i2c_client *client, pm_message_t state)
return ret;
}
-static int taos_resume(struct i2c_client *client)
+static int taos_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -916,6 +917,11 @@ static int taos_resume(struct i2c_client *client)
return ret;
}
+static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
+#define TAOS_PM_OPS (&taos_pm_ops)
+#else
+#define TAOS_PM_OPS NULL
+#endif
static int __devexit taos_remove(struct i2c_client *client)
{
@@ -937,10 +943,9 @@ MODULE_DEVICE_TABLE(i2c, taos_idtable);
static struct i2c_driver taos_driver = {
.driver = {
.name = "tsl2583",
+ .pm = TAOS_PM_OPS,
},
.id_table = taos_idtable,
- .suspend = taos_suspend,
- .resume = taos_resume,
.probe = taos_probe,
.remove = __devexit_p(taos_remove),
};
diff --git a/drivers/staging/iio/machine.h b/drivers/staging/iio/machine.h
new file mode 100644
index 00000000000..0b1f19bfdc4
--- /dev/null
+++ b/drivers/staging/iio/machine.h
@@ -0,0 +1,24 @@
+/*
+ * Industrial I/O in kernel access map definitions for board files.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/**
+ * struct iio_map - description of link between consumer and device channels
+ * @adc_channel_label: Label used to identify the channel on the provider.
+ * This is matched against the datasheet_name element
+ * of struct iio_chan_spec.
+ * @consumer_dev_name: Name to uniquely identify the consumer device.
+ * @consumer_channel: Unique name used to idenitify the channel on the
+ * consumer side.
+ */
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+};
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 3158f12cb05..d5ddac3d883 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -564,9 +564,17 @@ static const struct i2c_device_id ak8975_id[] = {
MODULE_DEVICE_TABLE(i2c, ak8975_id);
+static const struct of_device_id ak8975_of_match[] = {
+ { .compatible = "asahi-kasei,ak8975", },
+ { .compatible = "ak8975", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ak8975_of_match);
+
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
+ .of_match_table = ak8975_of_match,
},
.probe = ak8975_probe,
.remove = __devexit_p(ak8975_remove),
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index f2e85a9cf19..91dd3da70cb 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -86,7 +86,7 @@
#define RATE_NOT_USED 0x07
/*
- * Device Configutration
+ * Device Configuration
*/
#define CONF_NORMAL 0x00
#define CONF_POSITIVE_BIAS 0x01
@@ -142,7 +142,7 @@ static s32 hmc5843_configure(struct i2c_client *client,
(operating_mode & 0x03));
}
-/* Return the measurement value from the specified channel */
+/* Return the measurement value from the specified channel */
static int hmc5843_read_measurement(struct iio_dev *indio_dev,
int address,
int *val)
@@ -169,7 +169,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
/*
* From the datasheet
* 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
- * device continuously performs conversions an places the result in the
+ * device continuously performs conversions and places the result in the
* data register.
*
* 1 - Single-Conversion Mode : device performs a single measurement,
@@ -588,19 +588,26 @@ static int hmc5843_remove(struct i2c_client *client)
return 0;
}
-static int hmc5843_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int hmc5843_suspend(struct device *dev)
{
- hmc5843_configure(client, MODE_SLEEP);
+ hmc5843_configure(to_i2c_client(dev), MODE_SLEEP);
return 0;
}
-static int hmc5843_resume(struct i2c_client *client)
+static int hmc5843_resume(struct device *dev)
{
- struct hmc5843_data *data = i2c_get_clientdata(client);
- hmc5843_configure(client, data->operating_mode);
+ struct hmc5843_data *data = i2c_get_clientdata(to_i2c_client(dev));
+ hmc5843_configure(to_i2c_client(dev), data->operating_mode);
return 0;
}
+static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_suspend, hmc5843_resume);
+#define HMC5843_PM_OPS (&hmc5843_pm_ops)
+#else
+#define HMC5843_PM_OPS NULL
+#endif
+
static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
@@ -610,14 +617,13 @@ MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
+ .pm = HMC5843_PM_OPS,
},
.id_table = hmc5843_id,
.probe = hmc5843_probe,
.remove = hmc5843_remove,
.detect = hmc5843_detect,
.address_list = normal_i2c,
- .suspend = hmc5843_suspend,
- .resume = hmc5843_resume,
};
module_i2c_driver(hmc5843_driver);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index f29f2b278fe..c45b23bb122 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
/**
* ade7758_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -144,8 +144,6 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
return ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
index 142c50d71fd..6a3db142363 100644
--- a/drivers/staging/iio/meter/meter.h
+++ b/drivers/staging/iio/meter/meter.h
@@ -362,7 +362,7 @@
#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
-/* on the rising and falling edge of the the voltage waveform */
+/* on the rising and falling edge of the voltage waveform */
#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 3e24ec45585..b9945ec44fa 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -147,7 +147,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
size_t data_available, buffer_size;
/* A userspace program has probably made an error if it tries to
- * read something that is not a whole number of bpds.
+ * read something that is not a whole number of bpds.
* Return an error.
*/
if (n % ring->buf.bytes_per_datum) {
@@ -229,7 +229,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
/* setup the next read position */
/* Beware, this may fail due to concurrency fun and games.
- * Possible that sufficient fill commands have run to push the read
+ * Possible that sufficient fill commands have run to push the read
* pointer past where we would be after the rip. If this occurs, leave
* it be.
*/
@@ -329,6 +329,16 @@ static struct attribute_group iio_ring_attribute_group = {
.name = "buffer",
};
+static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
+ .store_to = &iio_store_to_sw_rb,
+ .read_first_n = &iio_read_first_n_sw_rb,
+ .request_update = &iio_request_update_sw_rb,
+ .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
+ .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
+ .get_length = &iio_get_length_sw_rb,
+ .set_length = &iio_set_length_sw_rb,
+};
+
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_buffer *buf;
@@ -341,6 +351,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
+ buf->access = &ring_sw_access_funcs;
return buf;
}
@@ -352,16 +363,5 @@ void iio_sw_rb_free(struct iio_buffer *r)
}
EXPORT_SYMBOL(iio_sw_rb_free);
-const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .store_to = &iio_store_to_sw_rb,
- .read_first_n = &iio_read_first_n_sw_rb,
- .request_update = &iio_request_update_sw_rb,
- .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
- .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
- .get_length = &iio_get_length_sw_rb,
- .set_length = &iio_set_length_sw_rb,
-};
-EXPORT_SYMBOL(ring_sw_access_funcs);
-
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index e6a6e2c4096..7556e212236 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -25,11 +25,6 @@
#define _IIO_RING_SW_H_
#include "buffer.h"
-/**
- * ring_sw_access_funcs - access functions for a software ring buffer
- **/
-extern const struct iio_buffer_access_funcs ring_sw_access_funcs;
-
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
void iio_sw_rb_free(struct iio_buffer *ring);
#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 1cbb25dff8b..665653d79f0 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -232,17 +232,7 @@ static struct platform_driver iio_bfin_tmr_trigger_driver = {
.remove = __devexit_p(iio_bfin_tmr_trigger_remove),
};
-static int __init iio_bfin_tmr_trig_init(void)
-{
- return platform_driver_register(&iio_bfin_tmr_trigger_driver);
-}
-module_init(iio_bfin_tmr_trig_init);
-
-static void __exit iio_bfin_tmr_trig_exit(void)
-{
- platform_driver_unregister(&iio_bfin_tmr_trigger_driver);
-}
-module_exit(iio_bfin_tmr_trig_exit);
+module_platform_driver(iio_bfin_tmr_trigger_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index f2a65598162..a3465947235 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -160,17 +160,7 @@ static struct platform_driver iio_gpio_trigger_driver = {
},
};
-static int __init iio_gpio_trig_init(void)
-{
- return platform_driver_register(&iio_gpio_trigger_driver);
-}
-module_init(iio_gpio_trig_init);
-
-static void __exit iio_gpio_trig_exit(void)
-{
- platform_driver_unregister(&iio_gpio_trigger_driver);
-}
-module_exit(iio_gpio_trig_exit);
+module_platform_driver(iio_gpio_trigger_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index bd7416b2c56..a80cf67bf84 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -195,18 +195,8 @@ static struct platform_driver iio_trig_periodic_rtc_driver = {
},
};
-static int __init iio_trig_periodic_rtc_init(void)
-{
- return platform_driver_register(&iio_trig_periodic_rtc_driver);
-}
-
-static void __exit iio_trig_periodic_rtc_exit(void)
-{
- return platform_driver_unregister(&iio_trig_periodic_rtc_driver);
-}
+module_platform_driver(iio_trig_periodic_rtc_driver);
-module_init(iio_trig_periodic_rtc_init);
-module_exit(iio_trig_periodic_rtc_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
index b7d26474ad0..0c321366690 100644
--- a/drivers/staging/iio/types.h
+++ b/drivers/staging/iio/types.h
@@ -46,4 +46,8 @@ enum iio_modifier {
IIO_MOD_LIGHT_IR,
};
+#define IIO_VAL_INT 1
+#define IIO_VAL_INT_PLUS_MICRO 2
+#define IIO_VAL_INT_PLUS_NANO 3
+
#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/keucr/TODO b/drivers/staging/keucr/TODO
index 1c48e40e2b2..d6da656eee1 100644
--- a/drivers/staging/keucr/TODO
+++ b/drivers/staging/keucr/TODO
@@ -9,4 +9,4 @@ TODO:
- smcommon.h & smilsub.c: use kernel hweight8(), hweight16()
Please send any patches for this driver to Al Cho <acho@novell.com> and
-Greg Kroah-Hartman <gregkh@suse.de>.
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 127f9524774..c85c5b6bffb 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -107,7 +107,7 @@ void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm)
Wait until unlinking of all currently active capture URBs has been
finished.
*/
-static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -134,7 +134,7 @@ static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_in_urbs(line6pcm);
- wait_clear_audio_in_urbs(line6pcm);
+ line6_wait_clear_audio_in_urbs(line6pcm);
}
/*
@@ -193,25 +193,6 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
-int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_in)
- return 0;
-
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
{
kfree(line6pcm->buffer_in);
@@ -273,9 +254,9 @@ static void audio_in_callback(struct urb *urb)
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags)
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags)
&& (fsize > 0))
line6_capture_copy(line6pcm, fbuf, fsize);
}
@@ -291,9 +272,9 @@ static void audio_in_callback(struct urb *urb)
submit_audio_in_urb(line6pcm);
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags))
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags))
line6_capture_check_period(line6pcm, length);
}
}
@@ -341,17 +322,17 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- ret = line6_alloc_capture_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return ret;
+ }
line6pcm->period_in = params_period_bytes(hw_params);
return 0;
@@ -361,12 +342,7 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- line6_unlink_wait_clear_audio_in_urbs(line6pcm);
- line6_free_capture_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -380,7 +356,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
@@ -391,7 +367,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index 366cbaa7c88..4157bcb598a 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,7 +19,6 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
-extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
@@ -30,6 +29,7 @@ extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6a1959e16e0..e8023afd365 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -1346,7 +1346,7 @@ static void __exit line6_exit(void)
if (line6pcm == NULL)
continue;
- line6_pcm_stop(line6pcm, ~0);
+ line6_pcm_release(line6pcm, ~0);
}
usb_deregister(&line6_driver);
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 37675e66da8..90d2d4475cb 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -52,9 +52,9 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
line6pcm->impulse_volume = value;
if (value > 0)
- line6_pcm_start(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_IMPULSE);
else
- line6_pcm_stop(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_IMPULSE);
return count;
}
@@ -92,29 +92,43 @@ static bool test_flags(unsigned long flags0, unsigned long flags1,
return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
}
-int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
+ unsigned long flags_final = flags_old;
int err = 0;
line6pcm->prev_fbuf = NULL;
- if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_in) {
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_BUFFER;
+ }
+ }
+
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_STREAM)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
too soon.
*/
- if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
+ if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
return -EBUSY;
-
- if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
- err = line6_alloc_capture_buffer(line6pcm);
-
- if (err < 0)
- goto pcm_start_error;
}
line6pcm->count_in = 0;
@@ -122,55 +136,78 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
err = line6_submit_audio_in_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_STREAM;
}
- if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
- /*
- See comment above regarding PCM restart.
- */
- if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
- return -EBUSY;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_out) {
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
- if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
- err = line6_alloc_playback_buffer(line6pcm);
+ flags_final |= channels & LINE6_BITS_PLAYBACK_BUFFER;
+ }
+ }
- if (err < 0)
- goto pcm_start_error;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_STREAM)) {
+ /*
+ See comment above regarding PCM restart.
+ */
+ if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
+ return -EBUSY;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_PLAYBACK_STREAM;
}
return 0;
-pcm_start_error:
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
+pcm_acquire_error:
+ /*
+ If not all requested resources/streams could be obtained, release
+ those which were successfully obtained (if any).
+ */
+ line6_pcm_release(line6pcm, flags_final & channels);
return err;
}
-int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
line6_unlink_audio_in_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
- line6_free_capture_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_BUFFER)) {
+ line6_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
}
- if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_STREAM))
line6_unlink_audio_out_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
- line6_free_playback_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_BUFFER)) {
+ line6_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
}
return 0;
@@ -185,7 +222,7 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
unsigned long flags;
spin_lock_irqsave(&line6pcm->lock_trigger, flags);
- clear_bit(BIT_PREPARED, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PREPARED, &line6pcm->flags);
snd_pcm_group_for_each_entry(s, substream) {
switch (s->stream) {
@@ -498,13 +535,13 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
- if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ if ((line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM) == 0)
line6_unlink_wait_clear_audio_out_urbs(line6pcm);
break;
case SNDRV_PCM_STREAM_CAPTURE:
- if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ if ((line6pcm->flags & LINE6_BITS_CAPTURE_STREAM) == 0)
line6_unlink_wait_clear_audio_in_urbs(line6pcm);
break;
@@ -513,7 +550,7 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
MISSING_CASE;
}
- if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
+ if (!test_and_set_bit(LINE6_INDEX_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
line6pcm->pos_out_done = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 55d8297dd3d..5210ec8dbe1 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -46,57 +46,131 @@
(line6pcm->pcm->streams[stream].substream)
/*
- PCM mode bits and masks.
- "ALSA": operations triggered by applications via ALSA
- "MONITOR": software monitoring
- "IMPULSE": optional impulse response operation
+ PCM mode bits.
+
+ There are several features of the Line6 USB driver which require PCM
+ data to be exchanged with the device:
+ *) PCM playback and capture via ALSA
+ *) software monitoring (for devices without hardware monitoring)
+ *) optional impulse response measurement
+ However, from the device's point of view, there is just a single
+ capture and playback stream, which must be shared between these
+ subsystems. It is therefore necessary to maintain the state of the
+ subsystems with respect to PCM usage. We define several constants of
+ the form LINE6_BIT_PCM_<subsystem>_<direction>_<resource> with the
+ following meanings:
+ *) <subsystem> is one of
+ -) ALSA: PCM playback and capture via ALSA
+ -) MONITOR: software monitoring
+ -) IMPULSE: optional impulse response measurement
+ *) <direction> is one of
+ -) PLAYBACK: audio output (from host to device)
+ -) CAPTURE: audio input (from device to host)
+ *) <resource> is one of
+ -) BUFFER: buffer required by PCM data stream
+ -) STREAM: actual PCM data stream
+
+ The subsystems call line6_pcm_acquire() to acquire the (shared)
+ resources needed for a particular operation (e.g., allocate the buffer
+ for ALSA playback or start the capture stream for software monitoring).
+ When a resource is no longer needed, it is released by calling
+ line6_pcm_release(). Buffer allocation and stream startup are handled
+ separately to allow the ALSA kernel driver to perform them at
+ appropriate places (since the callback which starts a PCM stream is not
+ allowed to sleep).
*/
enum {
- /* individual bits: */
- BIT_PCM_ALSA_PLAYBACK,
- BIT_PCM_ALSA_CAPTURE,
- BIT_PCM_MONITOR_PLAYBACK,
- BIT_PCM_MONITOR_CAPTURE,
+ /* individual bit indices: */
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_STREAM,
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- BIT_PCM_IMPULSE_PLAYBACK,
- BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_STREAM,
#endif
- BIT_PAUSE_PLAYBACK,
- BIT_PREPARED,
-
- /* individual masks: */
-/* *INDENT-OFF* */
- MASK_PCM_ALSA_PLAYBACK = 1 << BIT_PCM_ALSA_PLAYBACK,
- MASK_PCM_ALSA_CAPTURE = 1 << BIT_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR_PLAYBACK = 1 << BIT_PCM_MONITOR_PLAYBACK,
- MASK_PCM_MONITOR_CAPTURE = 1 << BIT_PCM_MONITOR_CAPTURE,
+ LINE6_INDEX_PAUSE_PLAYBACK,
+ LINE6_INDEX_PREPARED,
+
+ /* individual bit masks: */
+ LINE6_BIT(PCM_ALSA_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_ALSA_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_ALSA_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_ALSA_CAPTURE_STREAM),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_STREAM),
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE_PLAYBACK = 1 << BIT_PCM_IMPULSE_PLAYBACK,
- MASK_PCM_IMPULSE_CAPTURE = 1 << BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_STREAM),
#endif
- MASK_PAUSE_PLAYBACK = 1 << BIT_PAUSE_PLAYBACK,
- MASK_PREPARED = 1 << BIT_PREPARED,
-/* *INDENT-ON* */
+ LINE6_BIT(PAUSE_PLAYBACK),
+ LINE6_BIT(PREPARED),
- /* combined masks (by operation): */
- MASK_PCM_ALSA = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR = MASK_PCM_MONITOR_PLAYBACK | MASK_PCM_MONITOR_CAPTURE,
+ /* combined bit masks (by operation): */
+ LINE6_BITS_PCM_ALSA_BUFFER =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER,
+
+ LINE6_BITS_PCM_ALSA_STREAM =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM,
+
+ LINE6_BITS_PCM_MONITOR =
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BITS_PCM_IMPULSE =
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM,
+#endif
+
+ /* combined bit masks (by direction): */
+ LINE6_BITS_PLAYBACK_BUFFER =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER ,
+
+ LINE6_BITS_PLAYBACK_STREAM =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM ,
+
+ LINE6_BITS_CAPTURE_BUFFER =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE = MASK_PCM_IMPULSE_PLAYBACK | MASK_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER ,
- /* combined masks (by direction): */
+ LINE6_BITS_CAPTURE_STREAM =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PLAYBACK =
- MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK |
- MASK_PCM_IMPULSE_PLAYBACK,
- MASK_CAPTURE =
- MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE |
- MASK_PCM_IMPULSE_CAPTURE
-#else
- MASK_PLAYBACK = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK,
- MASK_CAPTURE = MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+ LINE6_BITS_STREAM =
+ LINE6_BITS_PLAYBACK_STREAM |
+ LINE6_BITS_CAPTURE_STREAM
};
struct line6_pcm_properties {
@@ -290,7 +364,7 @@ struct snd_line6_pcm {
#endif
/**
- Several status bits (see BIT_*).
+ Several status bits (see LINE6_BIT_*).
*/
unsigned long flags;
@@ -302,16 +376,7 @@ extern int line6_init_pcm(struct usb_line6 *line6,
extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
extern int snd_line6_prepare(struct snd_pcm_substream *substream);
extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
-extern int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels);
-extern int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels);
-
-#define PRINT_FRAME_DIFF(op) { \
- static int diff_prev = 1000; \
- int diff = line6pcm->last_frame_out - line6pcm->last_frame_in; \
- if ((diff != diff_prev) && (abs(diff) < 100)) { \
- printk(KERN_INFO "%s frame diff = %d\n", op, diff); \
- diff_prev = diff; \
- } \
-}
+extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels);
+extern int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels);
#endif
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 4152db2328b..a0ab9d0493f 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -166,7 +166,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
struct usb_iso_packet_descriptor *fout =
&urb_out->iso_frame_desc[i];
- if (line6pcm->flags & MASK_CAPTURE)
+ if (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM)
fsize = line6pcm->prev_fsize;
if (fsize == 0) {
@@ -196,8 +196,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
- !test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags) &&
+ !test_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime;
@@ -238,10 +238,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (line6pcm->prev_fbuf != NULL) {
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (line6pcm->flags & MASK_PCM_IMPULSE) {
+ if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
create_impulse_test_signal(line6pcm, urb_out,
bytes_per_frame);
- if (line6pcm->flags & MASK_PCM_ALSA_CAPTURE) {
+ if (line6pcm->flags & LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
line6_capture_copy(line6pcm,
urb_out->transfer_buffer,
urb_out->
@@ -254,8 +254,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (!
(line6pcm->line6->
properties->capabilities & LINE6_BIT_HWMON)
-&& (line6pcm->flags & MASK_PLAYBACK)
-&& (line6pcm->flags & MASK_CAPTURE))
+ && (line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM)
+ && (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM))
add_monitor_signal(urb_out, line6pcm->prev_fbuf,
line6pcm->volume_monitor,
bytes_per_frame);
@@ -321,7 +321,7 @@ void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm)
/*
Wait until unlinking of all currently active playback URBs has been finished.
*/
-static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -348,26 +348,7 @@ static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_out_urbs(line6pcm);
- wait_clear_audio_out_urbs(line6pcm);
-}
-
-int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_out)
- return 0;
-
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
- }
-
- return 0;
+ line6_wait_clear_audio_out_urbs(line6pcm);
}
void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
@@ -407,7 +388,7 @@ static void audio_out_callback(struct urb *urb)
spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime = substream->runtime;
line6pcm->pos_out_done +=
length / line6pcm->properties->bytes_per_frame;
@@ -432,7 +413,7 @@ static void audio_out_callback(struct urb *urb)
if (!shutdown) {
submit_audio_out_urb(line6pcm);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
line6pcm->bytes_out += length;
if (line6pcm->bytes_out >= line6pcm->period_out) {
line6pcm->bytes_out %= line6pcm->period_out;
@@ -484,17 +465,17 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- ret = line6_alloc_playback_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return ret;
+ }
line6pcm->period_out = params_period_bytes(hw_params);
return 0;
@@ -504,12 +485,7 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- line6_unlink_wait_clear_audio_out_urbs(line6pcm);
- line6_free_playback_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -523,7 +499,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -534,7 +510,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -542,11 +518,11 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- set_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ set_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- clear_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
default:
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index 02487ff2453..743bd6f74c5 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,13 +29,13 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
-extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index f31057830db..b754f69a29c 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -207,9 +207,9 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
line6pcm->volume_monitor = ucontrol->value.integer.value[0];
if (line6pcm->volume_monitor > 0)
- line6_pcm_start(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_MONITOR);
else
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
return 1;
}
@@ -264,7 +264,7 @@ static void toneport_start_pcm(unsigned long arg)
{
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
struct usb_line6 *line6 = &toneport->line6;
- line6_pcm_start(line6->line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6->line6pcm, LINE6_BITS_PCM_MONITOR);
}
/* control definition */
@@ -320,7 +320,9 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
/* initialize source select: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
toneport_send_cmd(usbdev,
toneport_source_info[toneport->source].code,
0x0000);
@@ -363,7 +365,9 @@ static int toneport_try_init(struct usb_interface *interface,
/* register source select control: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
err =
snd_ctl_add(line6->card,
snd_ctl_new1(&toneport_control_source,
@@ -442,7 +446,7 @@ void line6_toneport_disconnect(struct usb_interface *interface)
struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm;
if (line6pcm != NULL) {
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
line6_pcm_disconnect(line6pcm);
}
}
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index aff9e5caea4..353d59d77b0 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -39,31 +39,29 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-enum {
- LINE6_ID_BASSPODXT,
- LINE6_ID_BASSPODXTLIVE,
- LINE6_ID_BASSPODXTPRO,
- LINE6_ID_GUITARPORT,
- LINE6_ID_POCKETPOD,
- LINE6_ID_PODHD300,
- LINE6_ID_PODHD500,
- LINE6_ID_PODSTUDIO_GX,
- LINE6_ID_PODSTUDIO_UX1,
- LINE6_ID_PODSTUDIO_UX2,
- LINE6_ID_PODX3,
- LINE6_ID_PODX3LIVE,
- LINE6_ID_PODXT,
- LINE6_ID_PODXTLIVE,
- LINE6_ID_PODXTPRO,
- LINE6_ID_TONEPORT_GX,
- LINE6_ID_TONEPORT_UX1,
- LINE6_ID_TONEPORT_UX2,
- LINE6_ID_VARIAX
-};
-
-#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_INDEX_ ## x
enum {
+ LINE6_INDEX_BASSPODXT,
+ LINE6_INDEX_BASSPODXTLIVE,
+ LINE6_INDEX_BASSPODXTPRO,
+ LINE6_INDEX_GUITARPORT,
+ LINE6_INDEX_POCKETPOD,
+ LINE6_INDEX_PODHD300,
+ LINE6_INDEX_PODHD500,
+ LINE6_INDEX_PODSTUDIO_GX,
+ LINE6_INDEX_PODSTUDIO_UX1,
+ LINE6_INDEX_PODSTUDIO_UX2,
+ LINE6_INDEX_PODX3,
+ LINE6_INDEX_PODX3LIVE,
+ LINE6_INDEX_PODXT,
+ LINE6_INDEX_PODXTLIVE,
+ LINE6_INDEX_PODXTPRO,
+ LINE6_INDEX_TONEPORT_GX,
+ LINE6_INDEX_TONEPORT_UX1,
+ LINE6_INDEX_TONEPORT_UX2,
+ LINE6_INDEX_VARIAX,
+
LINE6_BIT(BASSPODXT),
LINE6_BIT(BASSPODXTLIVE),
LINE6_BIT(BASSPODXTPRO),
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index 8ff5f38ea19..3d439b790cc 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -3825,6 +3825,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (!pdata_urb) {
+ usb_free_urb(purb);
SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
}
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 7855baa18e7..74421043b95 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -90,11 +90,11 @@ static void __exit sasem_exit(void);
struct sasem_context {
struct usb_device *dev;
- int vfd_isopen; /* VFD port has been opened */
- unsigned int vfd_contrast; /* VFD contrast */
- int ir_isopen; /* IR port has been opened */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
+ int vfd_isopen; /* VFD port has been opened */
+ unsigned int vfd_contrast; /* VFD contrast */
+ int ir_isopen; /* IR port has been opened */
+ int dev_present; /* USB device presence */
+ struct mutex ctx_lock; /* to lock this object */
wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
struct lirc_driver *driver;
@@ -106,10 +106,11 @@ struct sasem_context {
unsigned char usb_tx_buf[8];
struct tx_t {
- unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data buffer */
+ unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
+ * buffer */
struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
+ atomic_t busy; /* write in progress */
+ int status; /* status of tx completion */
} tx;
/* for dealing with repeat codes (wish there was a toggle bit!) */
diff --git a/drivers/staging/mei/TODO b/drivers/staging/mei/TODO
index 7d9a13b0f2d..fc266018355 100644
--- a/drivers/staging/mei/TODO
+++ b/drivers/staging/mei/TODO
@@ -3,5 +3,8 @@ TODO:
Upon Unstaging:
- move mei.h to include/linux/mei.h
- Documentation/ioctl/ioctl-number.txt
+ - move mei.txt under Documentation/mei/
+ - move mei-amt-version.c under Documentation/mei
+ - add hostprogs-y for mei-amt-version.c
- drop mei_version.h
- Updated MAINTAINERS
diff --git a/drivers/staging/mei/hw.h b/drivers/staging/mei/hw.h
index 9b9008cb693..24c4c962819 100644
--- a/drivers/staging/mei/hw.h
+++ b/drivers/staging/mei/hw.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -141,6 +141,11 @@ access to ME_CBD */
#define HBM_MAJOR_VERSION 1
#define HBM_TIMEOUT 1 /* 1 second */
+/* Host bus message command opcode */
+#define MEI_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define MEI_HBM_CMD_RES_MSK 0x80
+
/*
* MEI Bus Message Command IDs
*/
@@ -164,7 +169,7 @@ access to ME_CBD */
#define CLIENT_DISCONNECT_REQ_CMD 0x07
#define CLIENT_DISCONNECT_RES_CMD 0x87
-#define MEI_FLOW_CONTROL_CMD 0x08
+#define MEI_FLOW_CONTROL_CMD 0x08
/*
* MEI Stop Reason
@@ -213,15 +218,9 @@ struct mei_msg_hdr {
} __packed;
-struct hbm_cmd {
- u8 cmd:7;
- u8 is_response:1;
-} __packed;
-
-
struct mei_bus_message {
- struct hbm_cmd cmd;
- u8 command_specific_data[];
+ u8 hbm_cmd;
+ u8 data[0];
} __packed;
struct hbm_version {
@@ -230,41 +229,41 @@ struct hbm_version {
} __packed;
struct hbm_host_version_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved;
struct hbm_version host_version;
} __packed;
struct hbm_host_version_response {
- struct hbm_cmd cmd;
- int host_version_supported;
+ u8 hbm_cmd;
+ u8 host_version_supported;
struct hbm_version me_max_version;
} __packed;
struct hbm_host_stop_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reason;
u8 reserved[2];
} __packed;
struct hbm_host_stop_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
} __packed;
struct hbm_me_stop_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reason;
u8 reserved[2];
} __packed;
struct hbm_host_enum_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
} __packed;
struct hbm_host_enum_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
u8 valid_addresses[32];
} __packed;
@@ -279,14 +278,14 @@ struct mei_client_properties {
} __packed;
struct hbm_props_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 address;
u8 reserved[2];
} __packed;
struct hbm_props_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 address;
u8 status;
u8 reserved[1];
@@ -294,21 +293,21 @@ struct hbm_props_response {
} __packed;
struct hbm_client_connect_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved;
} __packed;
struct hbm_client_connect_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 status;
} __packed;
struct hbm_client_disconnect_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved[1];
@@ -317,7 +316,7 @@ struct hbm_client_disconnect_request {
#define MEI_FC_MESSAGE_RESERVED_LENGTH 5
struct hbm_flow_control {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 4ac3696883c..eab711fb5fc 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -362,11 +362,11 @@ void mei_host_start_message(struct mei_device *dev)
host_start_req =
(struct hbm_host_version_request *) &dev->wr_msg_buf[1];
memset(host_start_req, 0, sizeof(struct hbm_host_version_request));
- host_start_req->cmd.cmd = HOST_START_REQ_CMD;
+ host_start_req->hbm_cmd = HOST_START_REQ_CMD;
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
- if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
mei_hdr->length)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->mei_state = MEI_RESETING;
@@ -398,8 +398,8 @@ void mei_host_enum_clients_message(struct mei_device *dev)
host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
- host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD;
- if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
+ host_enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
mei_hdr->length)) {
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -407,7 +407,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
}
dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
- return ;
+ return;
}
@@ -482,10 +482,10 @@ int mei_host_client_properties(struct mei_device *dev)
memset(host_cli_req, 0, sizeof(struct hbm_props_request));
- host_cli_req->cmd.cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ host_cli_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
host_cli_req->address = b;
- if (!mei_write_message(dev, mei_header,
+ if (mei_write_message(dev, mei_header,
(unsigned char *)host_cli_req,
mei_header->length)) {
dev->mei_state = MEI_RESETING;
@@ -608,7 +608,7 @@ void mei_host_init_iamthif(struct mei_device *dev)
dev->iamthif_msg_buf = msg_buf;
- if (!mei_connect(dev, &dev->iamthif_cl)) {
+ if (mei_connect(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
dev->iamthif_cl.host_client_id = 0;
@@ -670,14 +670,12 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_disconnect(dev, cl)) {
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->cb_list,
- &dev->ctrl_rd_list.mei_cb.cb_list);
- } else {
rets = -ENODEV;
dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
goto free;
}
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->cb_list, &dev->ctrl_rd_list.mei_cb.cb_list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
list_add_tail(&cb->cb_list,
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index eb5df7fc226..9a2cfafc52a 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -125,7 +125,7 @@ int mei_count_empty_write_slots(struct mei_device *dev)
* @write_buffer: message buffer will be written
* @write_length: message size will be written
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO if write has failed
*/
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
@@ -157,7 +157,7 @@ int mei_write_message(struct mei_device *dev,
dw_to_write = ((write_length + 3) / 4);
if (dw_to_write > empty_slots)
- return 0;
+ return -EIO;
mei_reg_write(dev, H_CB_WW, *((u32 *) header));
@@ -177,9 +177,9 @@ int mei_write_message(struct mei_device *dev,
mei_hcsr_set(dev);
dev->me_hw_state = mei_mecsr_read(dev);
if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
- return 0;
+ return -EIO;
- return 1;
+ return 0;
}
/**
@@ -215,26 +215,17 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
{
- u32 i = 0;
- unsigned char temp_buf[sizeof(u32)];
-
- while (buffer_length >= sizeof(u32)) {
- ((u32 *) buffer)[i] = mei_mecbrw_read(dev);
+ u32 *reg_buf = (u32 *)buffer;
- dev_dbg(&dev->pdev->dev,
- "buffer[%d]= %d\n",
- i, ((u32 *) buffer)[i]);
-
- i++;
- buffer_length -= sizeof(u32);
- }
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_mecbrw_read(dev);
if (buffer_length > 0) {
- *((u32 *) &temp_buf) = mei_mecbrw_read(dev);
- memcpy(&buffer[i * 4], temp_buf, buffer_length);
+ u32 reg = mei_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
}
dev->host_hw_state |= H_IG;
@@ -284,7 +275,7 @@ int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
* @returns
* 0 on success
* -ENOENT when me client is not found
- * -EINVAL wehn ctrl credits are <= 0
+ * -EINVAL when ctrl credits are <= 0
*/
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
{
@@ -317,7 +308,7 @@ int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
{
@@ -335,18 +326,15 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
memset(mei_flow_control, 0, sizeof(*mei_flow_control));
mei_flow_control->host_addr = cl->host_client_id;
mei_flow_control->me_addr = cl->me_client_id;
- mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
+ mei_flow_control->hbm_cmd = MEI_FLOW_CONTROL_CMD;
memset(mei_flow_control->reserved, 0,
sizeof(mei_flow_control->reserved));
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) mei_flow_control,
- sizeof(struct hbm_flow_control)))
- return 0;
-
- return 1;
+ cl->host_client_id, cl->me_client_id);
+ return mei_write_message(dev, mei_hdr,
+ (unsigned char *) mei_flow_control,
+ sizeof(struct hbm_flow_control));
}
/**
@@ -380,7 +368,7 @@ int mei_other_client_is_connecting(struct mei_device *dev,
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
{
@@ -399,15 +387,12 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
mei_cli_disconnect->host_addr = cl->host_client_id;
mei_cli_disconnect->me_addr = cl->me_client_id;
- mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
+ mei_cli_disconnect->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
mei_cli_disconnect->reserved[0] = 0;
- if (!mei_write_message(dev, mei_hdr,
+ return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_disconnect,
- sizeof(struct hbm_client_disconnect_request)))
- return 0;
-
- return 1;
+ sizeof(struct hbm_client_disconnect_request));
}
/**
@@ -416,7 +401,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_connect(struct mei_device *dev, struct mei_cl *cl)
{
@@ -434,13 +419,10 @@ int mei_connect(struct mei_device *dev, struct mei_cl *cl)
(struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
mei_cli_connect->host_addr = cl->host_client_id;
mei_cli_connect->me_addr = cl->me_client_id;
- mei_cli_connect->cmd.cmd = CLIENT_CONNECT_REQ_CMD;
+ mei_cli_connect->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
mei_cli_connect->reserved = 0;
- if (!mei_write_message(dev, mei_hdr,
+ return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_connect,
- sizeof(struct hbm_client_connect_request)))
- return 0;
-
- return 1;
+ sizeof(struct hbm_client_connect_request));
}
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index aeae511419c..fb90c6f8a75 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length);
+ unsigned char *buffer,
+ unsigned long buffer_length);
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
@@ -59,7 +60,7 @@ void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
*/
void mei_watchdog_register(struct mei_device *dev);
/*
- * mei_watchdog_unregister - Uegistering watchdog interface
+ * mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
*/
void mei_watchdog_unregister(struct mei_device *dev);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 3544fee34e4..2007d2447b1 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -123,8 +123,7 @@ static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
- buffer = (unsigned char *) (dev->iamthif_msg_buf +
- dev->iamthif_msg_buf_index);
+ buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
mei_read_slots(dev, buffer, mei_hdr->length);
@@ -206,9 +205,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
cl = (struct mei_cl *)cb_pos->file_private;
if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
cl->reading_state = MEI_READING;
- buffer = (unsigned char *)
- (cb_pos->response_buffer.data +
- cb_pos->information);
+ buffer = cb_pos->response_buffer.data + cb_pos->information;
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -247,8 +244,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
- mei_read_slots(dev, (unsigned char *) dev->rd_msg_buf,
- mei_hdr->length);
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
*(u32 *) dev->rd_msg_buf);
}
@@ -267,26 +263,25 @@ quit:
static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
{
- if (((*slots) * sizeof(u32)) >= (sizeof(struct mei_msg_hdr)
+ if (((*slots) * sizeof(u32)) < (sizeof(struct mei_msg_hdr)
+ sizeof(struct hbm_flow_control))) {
- *slots -= (sizeof(struct mei_msg_hdr) +
- sizeof(struct hbm_flow_control) + 3) / 4;
- if (!mei_send_flow_control(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
- } else {
- dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
- dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_flow_control_pending = false;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty =
- mei_host_buffer_is_empty(dev);
- }
- return 0;
- } else {
return -EMSGSIZE;
}
+ *slots -= (sizeof(struct mei_msg_hdr) +
+ sizeof(struct hbm_flow_control) + 3) / 4;
+ if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
+ dev->iamthif_state = MEI_IAMTHIF_READING;
+ dev->iamthif_flow_control_pending = false;
+ dev->iamthif_msg_buf_index = 0;
+ dev->iamthif_msg_buf_size = 0;
+ dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
+ dev->mei_host_buffer_is_empty = mei_host_buffer_is_empty(dev);
+ return 0;
}
/**
@@ -310,7 +305,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_disconnect_request) + 3) / 4;
- if (!mei_disconnect(dev, cl)) {
+ if (mei_disconnect(dev, cl)) {
cl->status = 0;
cb_pos->information = 0;
list_move_tail(&cb_pos->cb_list,
@@ -601,8 +596,7 @@ static void mei_client_disconnect_request(struct mei_device *dev,
&dev->ext_msg_buf[1];
disconnect_res->host_addr = cl_pos->host_client_id;
disconnect_res->me_addr = cl_pos->me_client_id;
- *(u8 *) (&disconnect_res->cmd) =
- CLIENT_DISCONNECT_RES_CMD;
+ disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
disconnect_res->status = 0;
dev->extra_write_index = 2;
break;
@@ -632,15 +626,13 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct hbm_host_stop_request *host_stop_req;
int res;
- unsigned char *buffer;
/* read the message to our buffer */
- buffer = (unsigned char *) dev->rd_msg_buf;
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, buffer, mei_hdr->length);
- mei_msg = (struct mei_bus_message *) buffer;
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
- switch (*(u8 *) mei_msg) {
+ switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
version_res = (struct hbm_host_version_response *) mei_msg;
if (version_res->host_version_supported) {
@@ -659,6 +651,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
} else {
dev->version = version_res->me_max_version;
/* send stop message */
+ mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_host_stop_request);
@@ -671,7 +664,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
memset(host_stop_req,
0,
sizeof(struct hbm_host_stop_request));
- host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
+ host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
mei_write_message(dev, mei_hdr,
(unsigned char *) (host_stop_req),
@@ -725,7 +718,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
dev->me_client_index++;
dev->me_client_presentation_num++;
- /** Send Client Propeties request **/
+ /** Send Client Properties request **/
res = mei_host_client_properties(dev);
if (res < 0) {
dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed");
@@ -811,7 +804,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
host_stop_req =
(struct hbm_host_stop_request *) &dev->ext_msg_buf[1];
memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request));
- host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
+ host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
host_stop_req->reserved[0] = 0;
host_stop_req->reserved[1] = 0;
@@ -844,24 +837,21 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
{
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
- *slots -= (sizeof(struct mei_msg_hdr) +
- sizeof(struct hbm_flow_control) + 3) / 4;
- if (!mei_send_flow_control(dev, cl)) {
- cl->status = -ENODEV;
- cb_pos->information = 0;
- list_move_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- return -ENODEV;
- } else {
- list_move_tail(&cb_pos->cb_list,
- &dev->read_list.mei_cb.cb_list);
- }
- } else {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
return -EBADMSG;
}
+ *slots -= (sizeof(struct mei_msg_hdr) +
+ sizeof(struct hbm_flow_control) + 3) / 4;
+ if (mei_send_flow_control(dev, cl)) {
+ cl->status = -ENODEV;
+ cb_pos->information = 0;
+ list_move_tail(&cb_pos->cb_list, &cmpl_list->mei_cb.cb_list);
+ return -ENODEV;
+ }
+ list_move_tail(&cb_pos->cb_list, &dev->read_list.mei_cb.cb_list);
+
return 0;
}
@@ -887,7 +877,7 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
cl->state = MEI_FILE_CONNECTING;
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_connect_request) + 3) / 4;
- if (!mei_connect(dev, cl)) {
+ if (mei_connect(dev, cl)) {
cl->status = -ENODEV;
cb_pos->information = 0;
list_del(&cb_pos->cb_list);
@@ -944,7 +934,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
mei_hdr->length);
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
@@ -973,7 +963,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
(*slots) -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
@@ -1034,7 +1024,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
@@ -1069,7 +1059,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
@@ -1286,7 +1276,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
}
}
if (dev->stop)
- return ~ENODEV;
+ return -ENODEV;
/* complete control write list CB */
dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
@@ -1423,7 +1413,7 @@ void mei_timer(struct work_struct *work)
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
- dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n");
+ dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
mei_reset(dev, 1);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
@@ -1513,7 +1503,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
dev->host_hw_state = mei_hcsr_read(dev);
/* Ack the interrupt here
- * In case of MSI we don't go throuhg the quick handler */
+ * In case of MSI we don't go through the quick handler */
if (pci_dev_msi_enabled(dev->pdev))
mei_reg_write(dev, H_CSR, dev->host_hw_state);
@@ -1549,7 +1539,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
}
- /* check slots avalable for reading */
+ /* check slots available for reading */
slots = mei_count_full_read_slots(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
slots, dev->extra_write_index);
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 0752ead4269..0a80dc4e62f 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
#include "hw.h"
#include "mei.h"
#include "interface.h"
-#include "mei_version.h"
@@ -109,8 +108,8 @@ int mei_ioctl_connect_client(struct file *file,
dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
dev->me_clients[i].props.max_msg_length);
- /* if we're connecting to amthi client so we will use the exist
- * connection
+ /* if we're connecting to amthi client then we will use the
+ * existing connection
*/
if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
@@ -162,7 +161,7 @@ int mei_ioctl_connect_client(struct file *file,
&& !mei_other_client_is_connecting(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
dev->mei_host_buffer_is_empty = false;
- if (!mei_connect(dev, cl)) {
+ if (mei_connect(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
rets = -ENODEV;
goto end;
@@ -434,13 +433,11 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cl->read_cb = cb;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
- if (!mei_send_flow_control(dev, cl)) {
+ if (mei_send_flow_control(dev, cl)) {
rets = -ENODEV;
goto unlock;
- } else {
- list_add_tail(&cb->cb_list,
- &dev->read_list.mei_cb.cb_list);
}
+ list_add_tail(&cb->cb_list, &dev->read_list.mei_cb.cb_list);
} else {
list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
}
@@ -500,7 +497,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
- if (!mei_write_message(dev, &mei_hdr,
+ if (mei_write_message(dev, &mei_hdr,
(unsigned char *)(dev->iamthif_msg_buf),
mei_hdr.length))
return -ENODEV;
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index 1e1a9f996e7..7c9321fa7bb 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,7 +38,6 @@
#include "mei_dev.h"
#include "mei.h"
#include "interface.h"
-#include "mei_version.h"
#define MEI_READ_TIMEOUT 45
@@ -50,7 +49,6 @@
*/
static char mei_driver_name[] = MEI_DRIVER_NAME;
static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
-static const char mei_driver_version[] = MEI_DRIVER_VERSION;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
@@ -430,7 +428,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto free;
} else if ((!cl->read_cb || !cl->read_cb->information) &&
*offset > 0) {
- /*Offset needs to be cleaned for contingous reads*/
+ /*Offset needs to be cleaned for contiguous reads*/
*offset = 0;
rets = 0;
goto out;
@@ -493,7 +491,7 @@ copy_buffer:
goto free;
}
- /* length is being turncated to PAGE_SIZE, however, */
+ /* length is being truncated to PAGE_SIZE, however, */
/* information size may be longer */
length = min_t(size_t, length, (cb->information - *offset));
@@ -740,7 +738,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.reserved = 0;
dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
*((u32 *) &mei_hdr));
- if (!mei_write_message(dev, &mei_hdr,
+ if (mei_write_message(dev, &mei_hdr,
(unsigned char *) (write_cb->request_buffer.data),
mei_hdr.length)) {
rets = -ENODEV;
@@ -1206,8 +1204,7 @@ static int __init mei_init_module(void)
{
int ret;
- pr_debug("mei: %s - version %s\n",
- mei_driver_string, mei_driver_version);
+ pr_debug("mei: %s\n", mei_driver_string);
/* init pci module */
ret = pci_register_driver(&mei_driver);
if (ret < 0)
@@ -1238,4 +1235,3 @@ module_exit(mei_exit_module);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(MEI_DRIVER_VERSION);
diff --git a/drivers/staging/mei/mei-amt-version.c b/drivers/staging/mei/mei-amt-version.c
new file mode 100644
index 00000000000..ac2a507be25
--- /dev/null
+++ b/drivers/staging/mei/mei-amt-version.c
@@ -0,0 +1,481 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bits/wordsize.h>
+#include "mei.h"
+
+/*****************************************************************************
+ * Intel Management Engine Interface
+ *****************************************************************************/
+
+#define mei_msg(_me, fmt, ARGS...) do { \
+ if (_me->verbose) \
+ fprintf(stderr, fmt, ##ARGS); \
+} while (0)
+
+#define mei_err(_me, fmt, ARGS...) do { \
+ fprintf(stderr, "Error: " fmt, ##ARGS); \
+} while (0)
+
+struct mei {
+ uuid_le guid;
+ bool initialized;
+ bool verbose;
+ unsigned int buf_size;
+ unsigned char prot_ver;
+ int fd;
+};
+
+static void mei_deinit(struct mei *cl)
+{
+ if (cl->fd != -1)
+ close(cl->fd);
+ cl->fd = -1;
+ cl->buf_size = 0;
+ cl->prot_ver = 0;
+ cl->initialized = false;
+}
+
+static bool mei_init(struct mei *me, const uuid_le *guid,
+ unsigned char req_protocol_version, bool verbose)
+{
+ int result;
+ struct mei_client *cl;
+ struct mei_connect_client_data data;
+
+ mei_deinit(me);
+
+ me->verbose = verbose;
+
+ me->fd = open("/dev/mei", O_RDWR);
+ if (me->fd == -1) {
+ mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
+ goto err;
+ }
+ memcpy(&me->guid, guid, sizeof(*guid));
+ memset(&data, 0, sizeof(data));
+ me->initialized = true;
+
+ memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
+ result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
+ if (result) {
+ mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
+ goto err;
+ }
+ cl = &data.out_client_properties;
+ mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
+ mei_msg(me, "protocol_version %d\n", cl->protocol_version);
+
+ if ((req_protocol_version > 0) &&
+ (cl->protocol_version != req_protocol_version)) {
+ mei_err(me, "Intel MEI protocol version not supported\n");
+ goto err;
+ }
+
+ me->buf_size = cl->max_msg_length;
+ me->prot_ver = cl->protocol_version;
+
+ return true;
+err:
+ mei_deinit(me);
+ return false;
+}
+
+static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ ssize_t rc;
+
+ mei_msg(me, "call read length = %zd\n", len);
+
+ rc = read(me->fd, buffer, len);
+ if (rc < 0) {
+ mei_err(me, "read failed with status %zd %s\n",
+ rc, strerror(errno));
+ mei_deinit(me);
+ } else {
+ mei_msg(me, "read succeeded with result %zd\n", rc);
+ }
+ return rc;
+}
+
+static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ struct timeval tv;
+ ssize_t written;
+ ssize_t rc;
+ fd_set set;
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000000;
+
+ mei_msg(me, "call write length = %zd\n", len);
+
+ written = write(me->fd, buffer, len);
+ if (written < 0) {
+ rc = -errno;
+ mei_err(me, "write failed with status %zd %s\n",
+ written, strerror(errno));
+ goto out;
+ }
+
+ FD_ZERO(&set);
+ FD_SET(me->fd, &set);
+ rc = select(me->fd + 1 , &set, NULL, NULL, &tv);
+ if (rc > 0 && FD_ISSET(me->fd, &set)) {
+ mei_msg(me, "write success\n");
+ } else if (rc == 0) {
+ mei_err(me, "write failed on timeout with status\n");
+ goto out;
+ } else { /* rc < 0 */
+ mei_err(me, "write failed on select with status %zd\n", rc);
+ goto out;
+ }
+
+ rc = written;
+out:
+ if (rc < 0)
+ mei_deinit(me);
+
+ return rc;
+}
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy ME Client
+ ***************************************************************************/
+
+#define AMT_MAJOR_VERSION 1
+#define AMT_MINOR_VERSION 1
+
+#define AMT_STATUS_SUCCESS 0x0
+#define AMT_STATUS_INTERNAL_ERROR 0x1
+#define AMT_STATUS_NOT_READY 0x2
+#define AMT_STATUS_INVALID_AMT_MODE 0x3
+#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
+
+#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
+#define AMT_STATUS_SDK_RESOURCES 0x1004
+
+
+#define AMT_BIOS_VERSION_LEN 65
+#define AMT_VERSIONS_NUMBER 50
+#define AMT_UNICODE_STRING_LEN 20
+
+struct amt_unicode_string {
+ uint16_t length;
+ char string[AMT_UNICODE_STRING_LEN];
+} __attribute__((packed));
+
+struct amt_version_type {
+ struct amt_unicode_string description;
+ struct amt_unicode_string version;
+} __attribute__((packed));
+
+struct amt_version {
+ uint8_t major;
+ uint8_t minor;
+} __attribute__((packed));
+
+struct amt_code_versions {
+ uint8_t bios[AMT_BIOS_VERSION_LEN];
+ uint32_t count;
+ struct amt_version_type versions[AMT_VERSIONS_NUMBER];
+} __attribute__((packed));
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy Host Interface
+ ***************************************************************************/
+
+struct amt_host_if_msg_header {
+ struct amt_version version;
+ uint16_t _reserved;
+ uint32_t command;
+ uint32_t length;
+} __attribute__((packed));
+
+struct amt_host_if_resp_header {
+ struct amt_host_if_msg_header header;
+ uint32_t status;
+ unsigned char data[0];
+} __attribute__((packed));
+
+const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
+ 0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
+
+#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
+#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
+
+const struct amt_host_if_msg_header CODE_VERSION_REQ = {
+ .version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
+ ._reserved = 0,
+ .command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
+ .length = 0
+};
+
+
+struct amt_host_if {
+ struct mei mei_cl;
+ unsigned long send_timeout;
+ bool initialized;
+};
+
+
+static bool amt_host_if_init(struct amt_host_if *acmd,
+ unsigned long send_timeout, bool verbose)
+{
+ acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
+ acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
+ return acmd->initialized;
+}
+
+static void amt_host_if_deinit(struct amt_host_if *acmd)
+{
+ mei_deinit(&acmd->mei_cl);
+ acmd->initialized = false;
+}
+
+static uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
+{
+ uint32_t status = AMT_STATUS_SUCCESS;
+ struct amt_code_versions *code_ver;
+ size_t code_ver_len;
+ uint32_t ver_type_cnt;
+ uint32_t len;
+ uint32_t i;
+
+ code_ver = (struct amt_code_versions *)resp->data;
+ /* length - sizeof(status) */
+ code_ver_len = resp->header.length - sizeof(uint32_t);
+ ver_type_cnt = code_ver_len -
+ sizeof(code_ver->bios) -
+ sizeof(code_ver->count);
+ if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ for (i = 0; i < code_ver->count; i++) {
+ len = code_ver->versions[i].description.length;
+
+ if (len > AMT_UNICODE_STRING_LEN) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ len = code_ver->versions[i].version.length;
+ if (code_ver->versions[i].version.string[len] != '\0' ||
+ len != strlen(code_ver->versions[i].version.string)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+static uint32_t amt_verify_response_header(uint32_t command,
+ const struct amt_host_if_msg_header *resp_hdr,
+ uint32_t response_size)
+{
+ if (response_size < sizeof(struct amt_host_if_resp_header)) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (response_size != (resp_hdr->length +
+ sizeof(struct amt_host_if_msg_header))) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->command != command) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->_reserved != 0) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
+ resp_hdr->version.minor < AMT_MINOR_VERSION) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ }
+ return AMT_STATUS_SUCCESS;
+}
+
+static uint32_t amt_host_if_call(struct amt_host_if *acmd,
+ const unsigned char *command, ssize_t command_sz,
+ uint8_t **read_buf, uint32_t rcmd,
+ unsigned int expected_sz)
+{
+ uint32_t in_buf_sz;
+ uint32_t out_buf_sz;
+ ssize_t written;
+ uint32_t status;
+ struct amt_host_if_resp_header *msg_hdr;
+
+ in_buf_sz = acmd->mei_cl.buf_size;
+ *read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
+ if (*read_buf == NULL)
+ return AMT_STATUS_SDK_RESOURCES;
+ memset(*read_buf, 0, in_buf_sz);
+ msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
+
+ written = mei_send_msg(&acmd->mei_cl,
+ command, command_sz, acmd->send_timeout);
+ if (written != command_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
+ if (out_buf_sz <= 0)
+ return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
+
+ status = msg_hdr->status;
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ status = amt_verify_response_header(rcmd,
+ &msg_hdr->header, out_buf_sz);
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ if (expected_sz && expected_sz != out_buf_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ return AMT_STATUS_SUCCESS;
+}
+
+
+static uint32_t amt_get_code_versions(struct amt_host_if *cmd,
+ struct amt_code_versions *versions)
+{
+ struct amt_host_if_resp_header *response = NULL;
+ uint32_t status;
+
+ status = amt_host_if_call(cmd,
+ (const unsigned char *)&CODE_VERSION_REQ,
+ sizeof(CODE_VERSION_REQ),
+ (uint8_t **)&response,
+ AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
+
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ status = amt_verify_code_versions(response);
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ memcpy(versions, response->data, sizeof(struct amt_code_versions));
+out:
+ if (response != NULL)
+ free(response);
+
+ return status;
+}
+
+/************************** end of amt_host_if_command ***********************/
+int main(int argc, char **argv)
+{
+ struct amt_code_versions ver;
+ struct amt_host_if acmd;
+ unsigned int i;
+ uint32_t status;
+ int ret;
+ bool verbose;
+
+ verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
+
+ if (!amt_host_if_init(&acmd, 5000, verbose)) {
+ ret = 1;
+ goto out;
+ }
+
+ status = amt_get_code_versions(&acmd, &ver);
+
+ amt_host_if_deinit(&acmd);
+
+ switch (status) {
+ case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
+ printf("Intel AMT: DISABLED\n");
+ ret = 0;
+ break;
+ case AMT_STATUS_SUCCESS:
+ printf("Intel AMT: ENABLED\n");
+ for (i = 0; i < ver.count; i++) {
+ printf("%s:\t%s\n", ver.versions[i].description.string,
+ ver.versions[i].version.string);
+ }
+ ret = 0;
+ break;
+ default:
+ printf("An error has occurred\n");
+ ret = 1;
+ break;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/staging/mei/mei.h b/drivers/staging/mei/mei.h
index 6da7c4f33f9..bc0d8b69c49 100644
--- a/drivers/staging/mei/mei.h
+++ b/drivers/staging/mei/mei.h
@@ -1,63 +1,68 @@
-/*
-
- Intel Management Engine Interface (Intel MEI) Linux driver
- Intel MEI Interface Header
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- Intel Corporation.
- linux-mei@linux.intel.com
- http://www.intel.com
-
-
- BSD LICENSE
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
#ifndef _LINUX_MEI_H
#define _LINUX_MEI_H
@@ -72,7 +77,7 @@
* Only in close() (file_operation release()) the communication between
* the clients is disconnected
*
- * The IOCTL argument is a struct with a union the contains
+ * The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
*
* The input parameter is UUID of the FW Client.
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 516bfe7319a..2785697da59 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -4,7 +4,7 @@ Intel(R) Management Engine Interface (Intel(R) MEI)
Introduction
=======================
-The Intel Management Engine (Intel ME) is an isolated andprotected computing
+The Intel Management Engine (Intel ME) is an isolated and protected computing
resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
provides support for computer/IT management features. The feature set
depends on the Intel chipset SKU.
@@ -176,8 +176,8 @@ Intel AMT OS Health Watchdog:
=============================
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
-to any subsciber to this event. This mechanism means that
-IT knows when a platform crashes even when there is a hard failureon the host.
+to any subscriber to this event. This mechanism means that
+IT knows when a platform crashes even when there is a hard failure on the host.
The Intel AMT Watchdog is composed of two parts:
1) Firmware feature - receives the heartbeats
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index 82bacfc624c..10b1b4e2f8a 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,8 @@
#define MEI_WD_PARAMS_SIZE 4
#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
+#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
+
/*
* MEI PCI Device object
*/
@@ -87,7 +89,7 @@ enum mei_states {
MEI_POWER_UP
};
-/* init clients states*/
+/* init clients states*/
enum mei_init_clients_states {
MEI_START_MESSAGE = 0,
MEI_ENUM_CLIENTS_MESSAGE,
@@ -125,7 +127,7 @@ enum mei_cb_major_types {
*/
struct mei_message_data {
u32 size;
- char *data;
+ unsigned char *data;
} __packed;
@@ -219,7 +221,7 @@ struct mei_device {
bool need_reset;
u32 extra_write_index;
- u32 rd_msg_buf[128]; /* used for control messages */
+ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 wr_msg_buf[128]; /* used for control messages */
u32 ext_msg_buf[8]; /* for control responses */
u32 rd_msg_hdr;
diff --git a/drivers/staging/mei/mei_version.h b/drivers/staging/mei/mei_version.h
deleted file mode 100644
index 075bad8f0bf..00000000000
--- a/drivers/staging/mei/mei_version.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-#ifndef MEI_VERSION_H
-#define MEI_VERSION_H
-
-#define MAJOR_VERSION 7
-#define MINOR_VERSION 1
-#define QUICK_FIX_NUMBER 20
-#define VER_BUILD 1
-
-#define MEI_DRV_VER1 __stringify(MAJOR_VERSION) "." __stringify(MINOR_VERSION)
-#define MEI_DRV_VER2 __stringify(QUICK_FIX_NUMBER) "." __stringify(VER_BUILD)
-
-#define MEI_DRIVER_VERSION MEI_DRV_VER1 "." MEI_DRV_VER2
-
-#endif
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 8094941a98f..a6910da78a6 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -74,7 +74,7 @@ bool mei_wd_host_init(struct mei_device *dev)
dev_dbg(&dev->pdev->dev, "check wd_cl\n");
if (MEI_FILE_CONNECTING == dev->wd_cl.state) {
- if (!mei_connect(dev, &dev->wd_cl)) {
+ if (mei_connect(dev, &dev->wd_cl)) {
dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_cl.host_client_id = 0;
@@ -119,9 +119,7 @@ int mei_wd_send(struct mei_device *dev)
else
return -EINVAL;
- if (mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length))
- return 0;
- return -EIO;
+ return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
}
/**
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 86a8b8c418c..731301f524a 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -7,21 +7,21 @@ config MFD_NVEC
config KEYBOARD_NVEC
bool "Keyboard on nVidia compliant EC"
- depends on MFD_NVEC && INPUT=y
+ depends on MFD_NVEC && INPUT
help
Say Y here to enable support for a keyboard connected to
a nVidia compliant embedded controller.
config SERIO_NVEC_PS2
bool "PS2 on nVidia EC"
- depends on MFD_NVEC && MOUSE_PS2
+ depends on MFD_NVEC && SERIO
help
Say Y here to enable support for a Touchpad / Mouse connected
to a nVidia compliant embedded controller.
config NVEC_POWER
bool "NVEC charger and battery"
- depends on MFD_NVEC && POWER_SUPPLY=y
+ depends on MFD_NVEC && POWER_SUPPLY
help
Say Y to enable support for battery and charger interface for
nVidia compliant embedded controllers.
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index fafdfa25e13..3c60088871e 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -49,7 +49,7 @@
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWL (1<<2)
+#define I2C_SL_NEWSL (1<<2)
#define I2C_SL_NACK (1<<1)
#define I2C_SL_RESP (1<<0)
#define I2C_SL_IRQ (1<<3)
@@ -687,7 +687,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
clk_set_rate(nvec->i2c_clk, 8 * 80000);
- writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
@@ -701,7 +701,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
- writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
clk_disable(nvec->i2c_clk);
}
@@ -784,11 +784,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
nvec->i2c_clk = i2c_clk;
nvec->rx = &nvec->msg_pool[0];
- /* Set the gpio to low when we've got something to say */
- err = gpio_request(nvec->gpio, "nvec gpio");
- if (err < 0)
- dev_err(nvec->dev, "couldn't request gpio\n");
-
ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
init_completion(&nvec->sync_write);
@@ -802,6 +797,12 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->tx_work, nvec_request_master);
nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
+ err = gpio_request_one(nvec->gpio, GPIOF_OUT_INIT_HIGH, "nvec gpio");
+ if (err < 0) {
+ dev_err(nvec->dev, "couldn't request gpio\n");
+ goto failed;
+ }
+
err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
if (err) {
dev_err(nvec->dev, "couldn't request irq\n");
@@ -813,8 +814,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
clk_enable(i2c_clk);
- gpio_direction_output(nvec->gpio, 1);
- gpio_set_value(nvec->gpio, 1);
/* enable event reporting */
nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 742f5ccfe76..14a6f687cf7 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -21,10 +21,18 @@
#include "nvec.h"
-#define START_STREAMING {'\x06', '\x03', '\x04'}
+#define START_STREAMING {'\x06', '\x03', '\x06'}
#define STOP_STREAMING {'\x06', '\x04'}
#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
+#ifdef NVEC_PS2_DEBUG
+#define NVEC_PHD(str, buf, len) \
+ print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
+ 16, 1, buf, len, false)
+#else
+#define NVEC_PHD(str, buf, len)
+#endif
+
static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
struct nvec_ps2 {
@@ -67,18 +75,18 @@ static int nvec_ps2_notifier(struct notifier_block *nb,
case NVEC_PS2_EVT:
for (i = 0; i < msg[1]; i++)
serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
+ NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
return NOTIFY_STOP;
case NVEC_PS2:
- if (msg[2] == 1)
+ if (msg[2] == 1) {
for (i = 0; i < (msg[1] - 2); i++)
serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
- else if (msg[1] != 2) { /* !ack */
- print_hex_dump(KERN_WARNING, "unhandled mouse event: ",
- DUMP_PREFIX_NONE, 16, 1,
- msg, msg[1] + 2, true);
+ NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
}
+ else if (msg[1] != 2) /* !ack */
+ NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
return NOTIFY_STOP;
}
@@ -90,10 +98,10 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
- ser_dev->id.type = SERIO_8042;
+ ser_dev->id.type = SERIO_PS_PSTHRU;
ser_dev->write = ps2_sendcommand;
- ser_dev->open = ps2_startstreaming;
- ser_dev->close = ps2_stopstreaming;
+ ser_dev->start = ps2_startstreaming;
+ ser_dev->stop = ps2_stopstreaming;
strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
@@ -111,8 +119,35 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
return 0;
}
+static int nvec_mouse_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ /* disable mouse */
+ nvec_write_async(nvec, "\x06\xf4", 2);
+
+ /* send cancel autoreceive */
+ nvec_write_async(nvec, "\x06\x04", 2);
+
+ return 0;
+}
+
+static int nvec_mouse_resume(struct platform_device *pdev)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ ps2_startstreaming(ps2_dev.ser_dev);
+
+ /* enable mouse */
+ nvec_write_async(nvec, "\x06\xf5", 2);
+
+ return 0;
+}
+
static struct platform_driver nvec_mouse_driver = {
.probe = nvec_mouse_probe,
+ .suspend = nvec_mouse_suspend,
+ .resume = nvec_mouse_resume,
.driver = {
.name = "nvec-mouse",
.owner = THIS_MODULE,
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 63800ba71d0..e31949c9c87 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -164,9 +164,9 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
if (phy_addr != -1) {
- char phy_id[20];
+ char phy_id[MII_BUS_ID_SIZE + 3];
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", phy_addr);
priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 17ca163e589..490a7f15604 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -118,29 +118,35 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
{
}
-static void page_flip_cb(void *arg)
+static void vblank_cb(void *arg)
{
+ static uint32_t sequence = 0;
struct drm_crtc *crtc = arg;
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_pending_vblank_event *event = omap_crtc->event;
- struct drm_framebuffer *old_fb = omap_crtc->old_fb;
- struct timeval now;
unsigned long flags;
+ struct timeval now;
WARN_ON(!event);
omap_crtc->event = NULL;
- omap_crtc->old_fb = NULL;
-
- omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
/* wakeup userspace */
- /* TODO: this should happen *after* flip in vsync IRQ handler */
if (event) {
+ do_gettimeofday(&now);
+
spin_lock_irqsave(&dev->event_lock, flags);
+ /* TODO: we can't yet use the vblank time accounting,
+ * because omapdss lower layer is the one that knows
+ * the irq # and registers the handler, which more or
+ * less defeats how drm_irq works.. for now just fake
+ * the sequence number and use gettimeofday..
+ *
event->event.sequence = drm_vblank_count_and_time(
dev, omap_crtc->id, &now);
+ */
+ event->event.sequence = sequence++;
event->event.tv_sec = now.tv_sec;
event->event.tv_usec = now.tv_usec;
list_add_tail(&event->base.link,
@@ -150,6 +156,23 @@ static void page_flip_cb(void *arg)
}
}
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+
+ omap_crtc->old_fb = NULL;
+
+ omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+
+ /* really we'd like to setup the callback atomically w/ setting the
+ * new scanout buffer to avoid getting stuck waiting an extra vblank
+ * cycle.. for now go for correctness and later figure out speed..
+ */
+ omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+}
+
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
index da920dfdc59..2f122e00b51 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -20,23 +20,118 @@
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
+#include "drm_fb_helper.h"
+
+
#ifdef CONFIG_DEBUG_FS
+static int gem_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "All Objects:\n");
+ omap_gem_describe_objects(&priv->obj_list, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ seq_printf(m, "fbcon ");
+ omap_framebuffer_describe(priv->fbdev->fb, m);
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == priv->fbdev->fb)
+ continue;
+
+ seq_printf(m, "user ");
+ omap_framebuffer_describe(fb, m);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
+/* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = {
+ {"gem", gem_show, 0},
+ {"mm", mm_show, 0},
+ {"fb", fb_show, 0},
+};
+
+/* list of debugfs files that are specific to devices with dmm/tiler */
+static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
int omap_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(omap_debugfs_list,
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list),
minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_debugfs_list\n");
+ return ret;
+ }
+
+ if (dmm_is_available())
+ ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
}
void omap_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list), minor);
+ if (dmm_is_available())
+ drm_debugfs_remove_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list), minor);
}
#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 852d9440f72..1ecb6a73d79 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -34,6 +34,8 @@
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
+#define DMM_DRIVER_NAME "dmm"
+
/* mappings for associating views to luts */
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
@@ -465,7 +467,12 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
-int omap_dmm_remove(void)
+bool dmm_is_initialized(void)
+{
+ return omap_dmm ? true : false;
+}
+
+static int omap_dmm_remove(struct platform_device *dev)
{
struct tiler_block *block, *_block;
int i;
@@ -499,40 +506,49 @@ int omap_dmm_remove(void)
if (omap_dmm->irq != -1)
free_irq(omap_dmm->irq, omap_dmm);
+ iounmap(omap_dmm->base);
kfree(omap_dmm);
+ omap_dmm = NULL;
}
return 0;
}
-int omap_dmm_init(struct drm_device *dev)
+static int omap_dmm_probe(struct platform_device *dev)
{
int ret = -EFAULT, i;
struct tcm_area area = {0};
u32 hwinfo, pat_geom, lut_table_size;
- struct omap_drm_platform_data *pdata = dev->dev->platform_data;
-
- if (!pdata || !pdata->dmm_pdata) {
- dev_err(dev->dev, "dmm platform data not present, skipping\n");
- return ret;
- }
+ struct resource *mem;
omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
if (!omap_dmm) {
- dev_err(dev->dev, "failed to allocate driver data section\n");
+ dev_err(&dev->dev, "failed to allocate driver data section\n");
goto fail;
}
/* lookup hwmod data - base address and irq */
- omap_dmm->base = pdata->dmm_pdata->base;
- omap_dmm->irq = pdata->dmm_pdata->irq;
- omap_dmm->dev = dev->dev;
+ mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&dev->dev, "failed to get base address resource\n");
+ goto fail;
+ }
+
+ omap_dmm->base = ioremap(mem->start, SZ_2K);
if (!omap_dmm->base) {
- dev_err(dev->dev, "failed to get dmm base address\n");
+ dev_err(&dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ omap_dmm->irq = platform_get_irq(dev, 0);
+ if (omap_dmm->irq < 0) {
+ dev_err(&dev->dev, "failed to get IRQ resource\n");
goto fail;
}
+ omap_dmm->dev = &dev->dev;
+
hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -556,7 +572,7 @@ int omap_dmm_init(struct drm_device *dev)
"omap_dmm_irq_handler", omap_dmm);
if (ret) {
- dev_err(dev->dev, "couldn't register IRQ %d, error %d\n",
+ dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
omap_dmm->irq, ret);
omap_dmm->irq = -1;
goto fail;
@@ -575,25 +591,30 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
if (!omap_dmm->lut) {
- dev_err(dev->dev, "could not allocate lut table\n");
+ dev_err(&dev->dev, "could not allocate lut table\n");
ret = -ENOMEM;
goto fail;
}
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
- dev_err(dev->dev, "could not allocate dummy page\n");
+ dev_err(&dev->dev, "could not allocate dummy page\n");
ret = -ENOMEM;
goto fail;
}
+
+ /* set dma mask for device */
+ /* NOTE: this is a workaround for the hwmod not initializing properly */
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
- omap_dmm->refill_va = dma_alloc_coherent(dev->dev,
+ omap_dmm->refill_va = dma_alloc_coherent(&dev->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
&omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
- dev_err(dev->dev, "could not allocate refill memory\n");
+ dev_err(&dev->dev, "could not allocate refill memory\n");
goto fail;
}
@@ -602,7 +623,7 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->num_engines * sizeof(struct refill_engine),
GFP_KERNEL);
if (!omap_dmm->engines) {
- dev_err(dev->dev, "could not allocate engines\n");
+ dev_err(&dev->dev, "could not allocate engines\n");
ret = -ENOMEM;
goto fail;
}
@@ -624,7 +645,7 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
GFP_KERNEL);
if (!omap_dmm->tcm) {
- dev_err(dev->dev, "failed to allocate lut ptrs\n");
+ dev_err(&dev->dev, "failed to allocate lut ptrs\n");
ret = -ENOMEM;
goto fail;
}
@@ -636,7 +657,7 @@ int omap_dmm_init(struct drm_device *dev)
NULL);
if (!omap_dmm->tcm[i]) {
- dev_err(dev->dev, "failed to allocate container\n");
+ dev_err(&dev->dev, "failed to allocate container\n");
ret = -ENOMEM;
goto fail;
}
@@ -676,7 +697,7 @@ int omap_dmm_init(struct drm_device *dev)
return 0;
fail:
- omap_dmm_remove();
+ omap_dmm_remove(dev);
return ret;
}
@@ -766,10 +787,18 @@ int tiler_map_show(struct seq_file *s, void *arg)
const char *a2d = special;
const char *m2dp = m2d, *a2dp = a2d;
char nice[128];
- int h_adj = omap_dmm->lut_height / ydiv;
- int w_adj = omap_dmm->lut_width / xdiv;
+ int h_adj;
+ int w_adj;
unsigned long flags;
+ if (!omap_dmm) {
+ /* early return if dmm/tiler device is not initialized */
+ return 0;
+ }
+
+ h_adj = omap_dmm->lut_height / ydiv;
+ w_adj = omap_dmm->lut_width / xdiv;
+
map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
@@ -828,3 +857,17 @@ error:
return 0;
}
#endif
+
+struct platform_driver omap_dmm_driver = {
+ .probe = omap_dmm_probe,
+ .remove = omap_dmm_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DMM_DRIVER_NAME,
+ },
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
+MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
+MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
index f87cb657d68..7b1052a329e 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -16,6 +16,7 @@
#ifndef OMAP_DMM_TILER_H
#define OMAP_DMM_TILER_H
+#include <plat/cpu.h>
#include "omap_drv.h"
#include "tcm.h"
@@ -72,10 +73,6 @@ struct tiler_block {
#define TIL_ADDR(x, orient, a)\
((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
-/* externally accessible functions */
-int omap_dmm_init(struct drm_device *dev);
-int omap_dmm_remove(void);
-
#ifdef CONFIG_DEBUG_FS
int tiler_map_show(struct seq_file *s, void *arg);
#endif
@@ -97,7 +94,9 @@ uint32_t tiler_stride(enum tiler_fmt fmt);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+bool dmm_is_initialized(void);
+extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(uint32_t flags)
@@ -127,9 +126,9 @@ static inline bool validfmt(enum tiler_fmt fmt)
}
}
-struct omap_dmm_platform_data {
- void __iomem *base;
- int irq;
-};
+static inline int dmm_is_available(void)
+{
+ return cpu_is_omap44xx();
+}
#endif
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 3bbea9aac40..3df5b4c58ec 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -21,6 +21,7 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+#include "omap_dmm_tiler.h"
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
@@ -570,6 +571,11 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
+ priv->wq = alloc_workqueue("omapdrm",
+ WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+
+ INIT_LIST_HEAD(&priv->obj_list);
+
omap_gem_init(dev);
ret = omap_modeset_init(dev);
@@ -598,6 +604,8 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
static int dev_unload(struct drm_device *dev)
{
+ struct omap_drm_private *priv = dev->dev_private;
+
DBG("unload: dev=%p", dev);
drm_vblank_cleanup(dev);
@@ -607,6 +615,9 @@ static int dev_unload(struct drm_device *dev)
omap_modeset_free(dev);
omap_gem_deinit(dev);
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -792,6 +803,9 @@ static void pdev_shutdown(struct platform_device *device)
static int pdev_probe(struct platform_device *device)
{
DBG("%s", device->name);
+ if (platform_driver_register(&omap_dmm_driver))
+ dev_err(&device->dev, "DMM registration failed\n");
+
return drm_platform_init(&omap_drm_driver, device);
}
@@ -799,6 +813,8 @@ static int pdev_remove(struct platform_device *device)
{
DBG("");
drm_platform_exit(&omap_drm_driver, device);
+
+ platform_driver_unregister(&omap_dmm_driver);
return 0;
}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 61fe022dda5..b7e0f077300 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -42,21 +42,31 @@
struct omap_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
+
unsigned int num_planes;
struct drm_plane *planes[8];
+
unsigned int num_encoders;
struct drm_encoder *encoders[8];
+
unsigned int num_connectors;
struct drm_connector *connectors[8];
struct drm_fb_helper *fbdev;
+ struct workqueue_struct *wq;
+
+ struct list_head obj_list;
+
bool has_dmm;
};
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
void omap_debugfs_cleanup(struct drm_minor *minor);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
#endif
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
@@ -75,6 +85,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
+void omap_plane_on_endwin(struct drm_plane *plane,
+ void (*fxn)(void *), void *arg);
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_overlay_manager *mgr);
@@ -92,13 +104,16 @@ void omap_connector_mode_set(struct drm_connector *connector,
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
-int omap_framebuffer_pin(struct drm_framebuffer *fb);
-void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo));
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index d021a7ec58d..04b235b6724 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -59,6 +59,20 @@ static const struct format formats[] = {
{ OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
};
+/* convert from overlay's pixel formats bitmask to an array of fourcc's */
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes)
+{
+ uint32_t nformats = 0;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
+ if (formats[i].dss_format & supported_modes)
+ pixel_formats[nformats++] = formats[i].pixel_format;
+
+ return nformats;
+}
+
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
@@ -87,7 +101,7 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -123,41 +137,6 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.dirty = omap_framebuffer_dirty,
};
-/* pins buffer in preparation for scanout */
-int omap_framebuffer_pin(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(omap_fb->format->pixel_format);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- while (--i > 0) {
- struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- }
- return ret;
-}
-
-/* releases buffer when done with scanout */
-void omap_framebuffer_unpin(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- }
-}
-
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
@@ -187,10 +166,59 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
}
}
+/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
+ * buffers to unpin are just just pushed to the unpin fifo so that the
+ * caller can defer unpin until vblank.
+ *
+ * Note if this fails (ie. something went very wrong!), all buffers are
+ * unpinned, and the caller disables the overlay. We could have tried
+ * to revert back to the previous set of pinned buffers but if things are
+ * hosed there is no guarantee that would succeed.
+ */
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo))
+{
+ int ret = 0, i, na, nb;
+ struct omap_framebuffer *ofba = to_omap_framebuffer(a);
+ struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
+
+ na = a ? drm_format_num_planes(a->pixel_format) : 0;
+ nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+
+ for (i = 0; i < max(na, nb); i++) {
+ struct plane *pa, *pb;
+
+ pa = (i < na) ? &ofba->planes[i] : NULL;
+ pb = (i < nb) ? &ofbb->planes[i] : NULL;
+
+ if (pa) {
+ unpin(arg, pa->bo);
+ pa->paddr = 0;
+ }
+
+ if (pb && !ret)
+ ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+ }
+
+ if (ret) {
+ /* something went wrong.. unpin what has been pinned */
+ for (i = 0; i < nb; i++) {
+ struct plane *pb = &ofba->planes[i];
+ if (pb->paddr) {
+ unpin(arg, pb->bo);
+ pb->paddr = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- if (p >= drm_format_num_planes(omap_fb->format->pixel_format))
+ if (p >= drm_format_num_planes(fb->pixel_format))
return NULL;
return omap_fb->planes[p].bo;
}
@@ -249,6 +277,24 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
}
}
+#ifdef CONFIG_DEBUG_FS
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, plane->offset, plane->pitch);
+ omap_gem_describe(plane->bo, m);
+ }
+}
+#endif
+
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -337,8 +383,8 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
- plane->pitch = mode_cmd->pitches[i];
- plane->paddr = pitch;
+ plane->pitch = pitch;
+ plane->paddr = 0;
}
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 96940bbfc6f..11acd4c35ed 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -37,6 +37,9 @@ struct omap_fbdev {
struct drm_framebuffer *fb;
struct drm_gem_object *bo;
bool ywrap_enabled;
+
+ /* for deferred dmm roll when getting called in atomic ctx */
+ struct work_struct work;
};
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
@@ -75,12 +78,22 @@ static void omap_fbdev_imageblit(struct fb_info *fbi,
image->width, image->height);
}
+static void pan_worker(struct work_struct *work)
+{
+ struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
+ struct fb_info *fbi = fbdev->base.fbdev;
+ int npages;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
+}
+
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct drm_fb_helper *helper = get_fb(fbi);
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
- int npages;
if (!helper)
goto fallback;
@@ -88,9 +101,12 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
if (!fbdev->ywrap_enabled)
goto fallback;
- /* DMM roll shifts in 4K pages: */
- npages = fbi->fix.line_length >> PAGE_SHIFT;
- omap_gem_roll(fbdev->bo, var->yoffset * npages);
+ if (drm_can_sleep()) {
+ pan_worker(&fbdev->work);
+ } else {
+ struct omap_drm_private *priv = helper->dev->dev_private;
+ queue_work(priv->wq, &fbdev->work);
+ }
return 0;
@@ -336,6 +352,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
goto fail;
}
+ INIT_WORK(&fbdev->work, pan_worker);
+
helper = &fbdev->base;
helper->funcs = &omap_fb_helper_funcs;
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index b7d6f886c5c..921f058cc6a 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -45,6 +45,8 @@ int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct omap_gem_object {
struct drm_gem_object base;
+ struct list_head mm_list;
+
uint32_t flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
@@ -151,10 +153,23 @@ static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
{
if (obj->dev->dev_mapping) {
- size_t size = PAGE_SIZE * usergart[fmt].height;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int n = usergart[fmt].height;
+ size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ if (m > 1) {
+ int i;
+ /* if stride > than PAGE_SIZE then sparse mapping: */
+ for (i = n; i > 0; i--) {
+ unmap_mapping_range(obj->dev->dev_mapping,
+ off, PAGE_SIZE, 1);
+ off += PAGE_SIZE * m;
+ }
+ } else {
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
}
entry->obj = NULL;
@@ -254,13 +269,17 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
if (!obj->map_list.map) {
/* Make it mmapable */
size_t size = omap_gem_mmap_size(obj);
int ret = _drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
- dev_err(obj->dev->dev, "could not allocate mmap offset");
+ dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
}
@@ -336,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj,
void __user *vaddr;
int i, ret, slots;
- if (!usergart)
- return -EFAULT;
-
- /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
- * that are wider than 4kb
+ /*
+ * Note the height of the slot is also equal to the number of pages
+ * that need to be mapped in to fill 4kb wide CPU page. If the slot
+ * height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ */
+ const int n = usergart[fmt].height;
+ const int n_shift = usergart[fmt].height_shift;
+
+ /*
+ * If buffer width in bytes > PAGE_SIZE then the virtual stride is
+ * rounded up to next multiple of PAGE_SIZE.. this need to be taken
+ * into account in some of the math, so figure out virtual stride
+ * in pages
*/
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- /* actual address we start mapping at is rounded down to previous slot
+ /*
+ * Actual address we start mapping at is rounded down to previous slot
* boundary in the y direction:
*/
- base_pgoff = round_down(pgoff, usergart[fmt].height);
- vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
- entry = &usergart[fmt].entry[usergart[fmt].last];
+ base_pgoff = round_down(pgoff, m << n_shift);
+ /* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift;
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
evict_entry(entry->obj, fmt, entry);
@@ -363,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj,
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
- /* now convert base_pgoff to phys offset from virt offset:
- */
- base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
-
- /* map in pages. Note the height of the slot is also equal to the
- * number of pages that need to be mapped in to fill 4kb wide CPU page.
- * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
- * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
- * get a dummy page mapped in.. if someone reads/writes it they will get
- * random/undefined content, but at least it won't be corrupting
- * whatever other random page used to be mapped in, or other undefined
- * behavior.
+ /* now convert base_pgoff to phys offset from virt offset: */
+ base_pgoff = (base_pgoff >> n_shift) * slots;
+
+ /* for wider-than 4k.. figure out which part of the slot-row we want: */
+ if (m > 1) {
+ int off = pgoff % m;
+ entry->obj_pgoff += off;
+ base_pgoff /= m;
+ slots = min(slots - (off << n_shift), n);
+ base_pgoff += off << n_shift;
+ vaddr += off << PAGE_SHIFT;
+ }
+
+ /*
+ * Map in pages. Beyond the valid pixel part of the buffer, we set
+ * pages[i] to NULL to get a dummy page mapped in.. if someone
+ * reads/writes it they will get random/undefined content, but at
+ * least it won't be corrupting whatever other random page used to
+ * be mapped in, or other undefined behavior.
*/
memcpy(pages, &omap_obj->pages[base_pgoff],
sizeof(struct page *) * slots);
memset(pages + slots, 0,
- sizeof(struct page *) * (usergart[fmt].height - slots));
+ sizeof(struct page *) * (n - slots));
ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
if (ret) {
@@ -387,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj,
return ret;
}
- i = usergart[fmt].height;
pfn = entry->paddr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
- while (i--) {
+ for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
pfn += usergart[fmt].stride_pfn;
- vaddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
@@ -566,6 +604,8 @@ fail:
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
+ *
+ * Call only from non-atomic contexts.
*/
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
{
@@ -580,18 +620,6 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
omap_obj->roll = roll;
- if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
- /* this can get called from fbcon in atomic context.. so
- * just ignore it and wait for next time called from
- * interruptible context to update the PAT.. the result
- * may be that user sees wrap-around instead of scrolling
- * momentarily on the screen. If we wanted to be fancier
- * we could perhaps schedule some workqueue work at this
- * point.
- */
- return 0;
- }
-
mutex_lock(&obj->dev->struct_mutex);
/* if we aren't mapped yet, we don't need to do anything */
@@ -774,6 +802,56 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
return omap_obj->vaddr;
}
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint64_t off = 0;
+
+ WARN_ON(! mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+ off, omap_obj->paddr, omap_obj->paddr_cnt,
+ omap_obj->vaddr, omap_obj->roll);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
+ if (omap_obj->block) {
+ struct tcm_area *area = &omap_obj->block->area;
+ seq_printf(m, " (%dx%d, %dx%d)",
+ area->p0.x, area->p0.y,
+ area->p1.x, area->p1.y);
+ }
+ } else {
+ seq_printf(m, " %d", obj->size);
+ }
+
+ seq_printf(m, "\n");
+}
+
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct omap_gem_object *omap_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(omap_obj, list, mm_list) {
+ struct drm_gem_object *obj = &omap_obj->base;
+ seq_printf(m, " ");
+ omap_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
/* Buffer Synchronization:
*/
@@ -1040,6 +1118,10 @@ void omap_gem_free_object(struct drm_gem_object *obj)
evict(obj);
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ list_del(&omap_obj->mm_list);
+
if (obj->map_list.map) {
drm_gem_free_mmap_offset(obj);
}
@@ -1140,6 +1222,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto fail;
}
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+
obj = &omap_obj->base;
if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
@@ -1186,12 +1270,11 @@ void omap_gem_init(struct drm_device *dev)
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
- int i, j, ret;
+ int i, j;
- ret = omap_dmm_init(dev);
- if (ret) {
+ if (!dmm_is_initialized()) {
/* DMM only supported on OMAP4 and later, so this isn't fatal */
- dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
+ dev_warn(dev->dev, "DMM not available, disable DMM support\n");
return;
}
@@ -1241,6 +1324,5 @@ void omap_gem_deinit(struct drm_device *dev)
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
- omap_dmm_remove();
kfree(usergart);
}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
index 29275c7209e..f895363a5e5 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -84,7 +84,7 @@ fail:
page_cache_release(pages[i]);
}
drm_free_large(pages);
- return ERR_PTR(PTR_ERR(p));
+ return ERR_CAST(p);
}
/**
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
index 97909124a1f..7997be74010 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kfifo.h>
+
#include "omap_drv.h"
/* some hackery because omapdss has an 'enum omap_plane' (which would be
@@ -29,6 +31,11 @@
* plane funcs
*/
+struct callback {
+ void (*fxn)(void *);
+ void *arg;
+};
+
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
@@ -43,8 +50,84 @@ struct omap_plane {
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ /* for synchronizing access to unpins fifo */
+ struct mutex unpin_mutex;
+
+ /* set of bo's pending unpin until next END_WIN irq */
+ DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+ int num_unpins, pending_num_unpins;
+
+ /* for deferred unpin when we need to wait for scanout complete irq */
+ struct work_struct work;
+
+ /* callback on next endwin irq */
+ struct callback endwin;
};
+/* map from ovl->id to the irq we are interested in for scanout-done */
+static const uint32_t id2irq[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+};
+
+static void dispc_isr(void *arg, uint32_t mask)
+{
+ struct drm_plane *plane = arg;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_drm_private *priv = plane->dev->dev_private;
+
+ omap_dispc_unregister_isr(dispc_isr, plane,
+ id2irq[omap_plane->ovl->id]);
+
+ queue_work(priv->wq, &omap_plane->work);
+}
+
+static void unpin_worker(struct work_struct *work)
+{
+ struct omap_plane *omap_plane =
+ container_of(work, struct omap_plane, work);
+ struct callback endwin;
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ DBG("unpinning %d of %d", omap_plane->num_unpins,
+ omap_plane->num_unpins + omap_plane->pending_num_unpins);
+ while (omap_plane->num_unpins > 0) {
+ struct drm_gem_object *bo = NULL;
+ int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
+ WARN_ON(!ret);
+ omap_gem_put_paddr(bo);
+ drm_gem_object_unreference_unlocked(bo);
+ omap_plane->num_unpins--;
+ }
+ endwin = omap_plane->endwin;
+ omap_plane->endwin.fxn = NULL;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (endwin.fxn)
+ endwin.fxn(endwin.arg);
+}
+
+static void install_irq(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay *ovl = omap_plane->ovl;
+ int ret;
+
+ ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
+
+ /*
+ * omapdss has upper limit on # of registered irq handlers,
+ * which we shouldn't hit.. but if we do the limit should
+ * be raised or bad things happen:
+ */
+ WARN_ON(ret == -EBUSY);
+}
/* push changes down to dss2 */
static int commit(struct drm_plane *plane)
@@ -71,6 +154,11 @@ static int commit(struct drm_plane *plane)
return ret;
}
+ mutex_lock(&omap_plane->unpin_mutex);
+ omap_plane->num_unpins += omap_plane->pending_num_unpins;
+ omap_plane->pending_num_unpins = 0;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
/* our encoder doesn't necessarily get a commit() after this, in
* particular in the dpms() and mode_set_base() cases, so force the
* manager to update:
@@ -83,8 +171,20 @@ static int commit(struct drm_plane *plane)
dev_err(dev->dev, "could not apply settings\n");
return ret;
}
+
+ /*
+ * NOTE: really this should be atomic w/ mgr->apply() but
+ * omapdss does not expose such an API
+ */
+ if (omap_plane->num_unpins > 0)
+ install_irq(plane);
+
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_plane->work);
}
+
if (ovl->is_enabled(ovl)) {
omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
info->out_width, info->out_height);
@@ -137,21 +237,48 @@ static void update_manager(struct drm_plane *plane)
}
}
+static void unpin(void *arg, struct drm_gem_object *bo)
+{
+ struct drm_plane *plane = arg;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ if (kfifo_put(&omap_plane->unpin_fifo,
+ (const struct drm_gem_object **)&bo)) {
+ omap_plane->pending_num_unpins++;
+ /* also hold a ref so it isn't free'd while pinned */
+ drm_gem_object_reference(bo);
+ } else {
+ dev_err(plane->dev->dev, "unpin fifo full!\n");
+ omap_gem_put_paddr(bo);
+ }
+}
+
/* update which fb (if any) is pinned for scanout */
static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- int ret = 0;
+ struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
+
+ if (pinned_fb != fb) {
+ int ret;
+
+ DBG("%p -> %p", pinned_fb, fb);
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "could not swap %p -> %p\n",
+ omap_plane->pinned_fb, fb);
+ omap_plane->pinned_fb = NULL;
+ return ret;
+ }
- if (omap_plane->pinned_fb != fb) {
- if (omap_plane->pinned_fb)
- omap_framebuffer_unpin(omap_plane->pinned_fb);
omap_plane->pinned_fb = fb;
- if (fb)
- ret = omap_framebuffer_pin(fb);
}
- return ret;
+ return 0;
}
/* update parameters that are dependent on the framebuffer dimensions and
@@ -241,6 +368,8 @@ static void omap_plane_destroy(struct drm_plane *plane)
DBG("%s", omap_plane->ovl->name);
omap_plane_disable(plane);
drm_plane_cleanup(plane);
+ WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+ kfifo_free(&omap_plane->unpin_fifo);
kfree(omap_plane);
}
@@ -258,37 +387,34 @@ int omap_plane_dpms(struct drm_plane *plane, int mode)
if (!r)
r = ovl->enable(ovl);
} else {
+ struct omap_drm_private *priv = plane->dev->dev_private;
r = ovl->disable(ovl);
update_pin(plane, NULL);
+ queue_work(priv->wq, &omap_plane->work);
}
return r;
}
+void omap_plane_on_endwin(struct drm_plane *plane,
+ void (*fxn)(void *), void *arg)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ omap_plane->endwin.fxn = fxn;
+ omap_plane->endwin.arg = arg;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ install_irq(plane);
+}
+
static const struct drm_plane_funcs omap_plane_funcs = {
.update_plane = omap_plane_update,
.disable_plane = omap_plane_disable,
.destroy = omap_plane_destroy,
};
-static const uint32_t formats[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_NV12,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
struct omap_overlay *ovl, unsigned int possible_crtcs,
@@ -296,21 +422,38 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
{
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
+ int ret;
DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
possible_crtcs, priv);
+ /* friendly reminder to update table for future hw: */
+ WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane) {
dev_err(dev->dev, "could not allocate plane\n");
goto fail;
}
+ mutex_init(&omap_plane->unpin_mutex);
+
+ ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unpin FIFO\n");
+ goto fail;
+ }
+
+ INIT_WORK(&omap_plane->work, unpin_worker);
+
+ omap_plane->nformats = omap_framebuffer_get_formats(
+ omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
+ ovl->supported_modes);
omap_plane->ovl = ovl;
plane = &omap_plane->base;
drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
- formats, ARRAY_SIZE(formats), priv);
+ omap_plane->formats, omap_plane->nformats, priv);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
@@ -330,7 +473,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (priv)
omap_plane->info.zorder = 0;
else
- omap_plane->info.zorder = 1;
+ omap_plane->info.zorder = ovl->id;
update_manager(plane);
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Kbuild
new file mode 100644
index 00000000000..6cc84cb3f0a
--- /dev/null
+++ b/drivers/staging/ozwpan/Kbuild
@@ -0,0 +1,19 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011 Ozmo Inc
+# Released under the GNU General Public License Version 2 (GPLv2).
+# -----------------------------------------------------------------------------
+obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
+ozwpan-y := \
+ ozmain.o \
+ ozpd.o \
+ ozusbsvc.o \
+ ozusbsvc1.o \
+ ozhcd.o \
+ ozeltbuf.o \
+ ozproto.o \
+ ozcdev.o \
+ ozurbparanoia.o \
+ oztrace.o \
+ ozevent.o
+
+
diff --git a/drivers/staging/ozwpan/Kconfig b/drivers/staging/ozwpan/Kconfig
new file mode 100644
index 00000000000..7904caec546
--- /dev/null
+++ b/drivers/staging/ozwpan/Kconfig
@@ -0,0 +1,9 @@
+config USB_WPAN_HCD
+ tristate "USB over WiFi Host Controller"
+ depends on USB && NET
+ help
+ A driver for USB Host Controllers that are compatible with
+ Ozmo Devices USB over WiFi technology.
+
+ To compile this driver a module, choose M here: the module
+ will be called "ozwpan".
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
new file mode 100644
index 00000000000..bb1a69b9454
--- /dev/null
+++ b/drivers/staging/ozwpan/README
@@ -0,0 +1,25 @@
+OZWPAN USB Host Controller Driver
+---------------------------------
+This driver is a USB HCD driver that does not have an associated a physical
+device but instead uses Wi-Fi to communicate with the wireless peripheral.
+The USB requests are converted into a layer 2 network protocol and transmitted
+on the network using an ethertype (0x892e) regestered to Ozmo Device Inc.
+This driver is compatible with existing wireless devices that use Ozmo Devices
+technology.
+
+To operate the driver must be bound to a suitable network interface. This can
+be done when the module is loaded (specifying the name of the network interface
+as a paramter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
+loading using an ioctl call. See the ozappif.h file and the ioctls
+OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
+
+The devices connect to the host use Wi-Fi Direct so a network card that supports
+Wi-Fi direct is required. A recent version (0.8.x or later) version of the
+wpa_supplicant can be used to setup the network interface to create a persistent
+autonomous group (for older pre-WFD peripherals) or put in a listen state to
+allow group negotiation to occur for more recent devices that support WFD.
+
+The protocol used over the network does not directly mimic the USB bus
+transactions as this would be rather busy and inefficient. Instead the chapter 9
+requests are converted into a request/response pair of messages. (See
+ozprotocol.h for data structures used in the protocol).
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
new file mode 100644
index 00000000000..f7a9c122f59
--- /dev/null
+++ b/drivers/staging/ozwpan/TODO
@@ -0,0 +1,12 @@
+TODO:
+ - review user mode interface and determine if ioctls can be replaced
+ with something better. correctly export data structures to user mode
+ if ioctls are still required and allocate ioctl numbers from
+ ioctl-number.txt.
+ - check USB HCD implementation is complete and correct.
+ - remove any debug and trace code.
+ - code review by USB developer community.
+ - testing with as many devices as possible.
+
+Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com>
+and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
new file mode 100644
index 00000000000..af027329387
--- /dev/null
+++ b/drivers/staging/ozwpan/ozappif.h
@@ -0,0 +1,46 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZAPPIF_H
+#define _OZAPPIF_H
+
+#include "ozeventdef.h"
+
+#define OZ_IOCTL_MAGIC 0xf4
+
+struct oz_mac_addr {
+ unsigned char a[6];
+};
+
+#define OZ_MAX_PDS 8
+
+struct oz_pd_list {
+ int count;
+ struct oz_mac_addr addr[OZ_MAX_PDS];
+};
+
+#define OZ_MAX_BINDING_LEN 32
+
+struct oz_binding_info {
+ char name[OZ_MAX_BINDING_LEN];
+};
+
+struct oz_test {
+ int action;
+};
+
+#define OZ_IOCTL_GET_PD_LIST _IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list)
+#define OZ_IOCTL_SET_ACTIVE_PD _IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr)
+#define OZ_IOCTL_GET_ACTIVE_PD _IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr)
+#define OZ_IOCTL_CLEAR_EVENTS _IO(OZ_IOCTL_MAGIC, 3)
+#define OZ_IOCTL_GET_EVENTS _IOR(OZ_IOCTL_MAGIC, 4, struct oz_evtlist)
+#define OZ_IOCTL_ADD_BINDING _IOW(OZ_IOCTL_MAGIC, 5, struct oz_binding_info)
+#define OZ_IOCTL_TEST _IOWR(OZ_IOCTL_MAGIC, 6, struct oz_test)
+#define OZ_IOCTL_SET_EVENT_MASK _IOW(OZ_IOCTL_MAGIC, 7, unsigned long)
+#define OZ_IOCTL_REMOVE_BINDING _IOW(OZ_IOCTL_MAGIC, 8, struct oz_binding_info)
+#define OZ_IOCTL_MAX 9
+
+
+#endif /* _OZAPPIF_H */
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
new file mode 100644
index 00000000000..1c380d68796
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -0,0 +1,521 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "oztrace.h"
+#include "ozappif.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+#define OZ_RD_BUF_SZ 256
+struct oz_cdev {
+ dev_t devnum;
+ struct cdev cdev;
+ wait_queue_head_t rdq;
+ spinlock_t lock;
+ u8 active_addr[ETH_ALEN];
+ struct oz_pd *active_pd;
+};
+
+/* Per PD context for the serial service stored in the PD. */
+struct oz_serial_ctx {
+ atomic_t ref_count;
+ u8 tx_seq_num;
+ u8 rx_seq_num;
+ u8 rd_buf[OZ_RD_BUF_SZ];
+ int rd_in;
+ int rd_out;
+};
+/*------------------------------------------------------------------------------
+ */
+int g_taction;
+/*------------------------------------------------------------------------------
+ */
+static struct oz_cdev g_cdev;
+/*------------------------------------------------------------------------------
+ * Context: process and softirq
+ */
+static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
+{
+ struct oz_serial_ctx *ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (ctx)
+ atomic_inc(&ctx->ref_count);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ return ctx;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
+{
+ if (atomic_dec_and_test(&ctx->ref_count)) {
+ oz_trace("Dealloc serial context.\n");
+ kfree(ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct oz_cdev *dev;
+ oz_trace("oz_cdev_open()\n");
+ oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
+ dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+ filp->private_data = dev;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_release(struct inode *inode, struct file *filp)
+{
+ oz_trace("oz_cdev_release()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *fpos)
+{
+ int n;
+ int ix;
+
+ struct oz_pd *pd;
+ struct oz_serial_ctx *ctx = 0;
+
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ if (pd)
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd == 0)
+ return -1;
+ ctx = oz_cdev_claim_ctx(pd);
+ if (ctx == 0)
+ goto out2;
+ n = ctx->rd_in - ctx->rd_out;
+ if (n < 0)
+ n += OZ_RD_BUF_SZ;
+ if (count > n)
+ count = n;
+ ix = ctx->rd_out;
+ n = OZ_RD_BUF_SZ - ix;
+ if (n > count)
+ n = count;
+ if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
+ count = 0;
+ goto out1;
+ }
+ ix += n;
+ if (ix == OZ_RD_BUF_SZ)
+ ix = 0;
+ if (n < count) {
+ if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
+ count = 0;
+ goto out1;
+ }
+ ix = count-n;
+ }
+ ctx->rd_out = ix;
+out1:
+ oz_cdev_release_ctx(ctx);
+out2:
+ oz_pd_put(pd);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *fpos)
+{
+ struct oz_pd *pd;
+ struct oz_elt_buf *eb;
+ struct oz_elt_info *ei = 0;
+ struct oz_elt *elt;
+ struct oz_app_hdr *app_hdr;
+ struct oz_serial_ctx *ctx;
+
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ if (pd)
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd == 0)
+ return -1;
+ eb = &pd->elt_buff;
+ ei = oz_elt_info_alloc(eb);
+ if (ei == 0) {
+ count = 0;
+ goto out;
+ }
+ elt = (struct oz_elt *)ei->data;
+ app_hdr = (struct oz_app_hdr *)(elt+1);
+ elt->length = sizeof(struct oz_app_hdr) + count;
+ elt->type = OZ_ELT_APP_DATA;
+ ei->app_id = OZ_APPID_SERIAL;
+ ei->length = elt->length + sizeof(struct oz_elt);
+ app_hdr->app_id = OZ_APPID_SERIAL;
+ if (copy_from_user(app_hdr+1, buf, count))
+ goto out;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (ctx) {
+ app_hdr->elt_seq_num = ctx->tx_seq_num++;
+ if (ctx->tx_seq_num == 0)
+ ctx->tx_seq_num = 1;
+ spin_lock(&eb->lock);
+ if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
+ ei = 0;
+ spin_unlock(&eb->lock);
+ }
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+out:
+ if (ei) {
+ count = 0;
+ spin_lock_bh(&eb->lock);
+ oz_elt_info_free(eb, ei);
+ spin_unlock_bh(&eb->lock);
+ }
+ oz_pd_put(pd);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_set_active_pd(u8 *addr)
+{
+ int rc = 0;
+ struct oz_pd *pd;
+ struct oz_pd *old_pd;
+ pd = oz_pd_find(addr);
+ if (pd) {
+ spin_lock_bh(&g_cdev.lock);
+ memcpy(g_cdev.active_addr, addr, ETH_ALEN);
+ old_pd = g_cdev.active_pd;
+ g_cdev.active_pd = pd;
+ spin_unlock_bh(&g_cdev.lock);
+ if (old_pd)
+ oz_pd_put(old_pd);
+ } else {
+ if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ g_cdev.active_pd = 0;
+ memset(g_cdev.active_addr, 0,
+ sizeof(g_cdev.active_addr));
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd)
+ oz_pd_put(pd);
+ } else {
+ rc = -1;
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
+ return -ENOTTY;
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
+ _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ rc = !access_ok(VERIFY_READ, (void __user *)arg,
+ _IOC_SIZE(cmd));
+ if (rc)
+ return -EFAULT;
+ switch (cmd) {
+ case OZ_IOCTL_GET_PD_LIST: {
+ struct oz_pd_list list;
+ oz_trace("OZ_IOCTL_GET_PD_LIST\n");
+ list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
+ if (copy_to_user((void __user *)arg, &list,
+ sizeof(list)))
+ return -EFAULT;
+ }
+ break;
+ case OZ_IOCTL_SET_ACTIVE_PD: {
+ u8 addr[ETH_ALEN];
+ oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
+ if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
+ return -EFAULT;
+ rc = oz_set_active_pd(addr);
+ }
+ break;
+ case OZ_IOCTL_GET_ACTIVE_PD: {
+ u8 addr[ETH_ALEN];
+ oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
+ spin_lock_bh(&g_cdev.lock);
+ memcpy(addr, g_cdev.active_addr, ETH_ALEN);
+ spin_unlock_bh(&g_cdev.lock);
+ if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
+ return -EFAULT;
+ }
+ break;
+#ifdef WANT_EVENT_TRACE
+ case OZ_IOCTL_CLEAR_EVENTS:
+ oz_events_clear();
+ break;
+ case OZ_IOCTL_GET_EVENTS:
+ rc = oz_events_copy((void __user *)arg);
+ break;
+ case OZ_IOCTL_SET_EVENT_MASK:
+ if (copy_from_user(&g_evt_mask, (void __user *)arg,
+ sizeof(unsigned long))) {
+ return -EFAULT;
+ }
+ break;
+#endif /* WANT_EVENT_TRACE */
+ case OZ_IOCTL_ADD_BINDING:
+ case OZ_IOCTL_REMOVE_BINDING: {
+ struct oz_binding_info b;
+ if (copy_from_user(&b, (void __user *)arg,
+ sizeof(struct oz_binding_info))) {
+ return -EFAULT;
+ }
+ /* Make sure name is null terminated. */
+ b.name[OZ_MAX_BINDING_LEN-1] = 0;
+ if (cmd == OZ_IOCTL_ADD_BINDING)
+ oz_binding_add(b.name);
+ else
+ oz_binding_remove(b.name);
+ }
+ break;
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int ret = 0;
+ struct oz_cdev *dev = filp->private_data;
+ oz_trace("Poll called wait = %p\n", wait);
+ spin_lock_bh(&dev->lock);
+ if (dev->active_pd) {
+ struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
+ if (ctx) {
+ if (ctx->rd_in != ctx->rd_out)
+ ret |= POLLIN | POLLRDNORM;
+ oz_cdev_release_ctx(ctx);
+ }
+ }
+ spin_unlock_bh(&dev->lock);
+ if (wait)
+ poll_wait(filp, &dev->rdq, wait);
+ return ret;
+}
+/*------------------------------------------------------------------------------
+ */
+const struct file_operations oz_fops = {
+ .owner = THIS_MODULE,
+ .open = oz_cdev_open,
+ .release = oz_cdev_release,
+ .read = oz_cdev_read,
+ .write = oz_cdev_write,
+ .unlocked_ioctl = oz_cdev_ioctl,
+ .poll = oz_cdev_poll
+};
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_register(void)
+{
+ int err;
+ memset(&g_cdev, 0, sizeof(g_cdev));
+ err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
+ if (err < 0)
+ return err;
+ oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
+ MINOR(g_cdev.devnum));
+ cdev_init(&g_cdev.cdev, &oz_fops);
+ g_cdev.cdev.owner = THIS_MODULE;
+ g_cdev.cdev.ops = &oz_fops;
+ spin_lock_init(&g_cdev.lock);
+ init_waitqueue_head(&g_cdev.rdq);
+ err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_deregister(void)
+{
+ cdev_del(&g_cdev.cdev);
+ unregister_chrdev_region(g_cdev.devnum, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_init(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
+ oz_app_enable(OZ_APPID_SERIAL, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_cdev_term(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
+ oz_app_enable(OZ_APPID_SERIAL, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_cdev_start(struct oz_pd *pd, int resume)
+{
+ struct oz_serial_ctx *ctx;
+ struct oz_serial_ctx *old_ctx = 0;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
+ if (resume) {
+ oz_trace("Serial service resumed.\n");
+ return 0;
+ }
+ ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
+ if (ctx == 0)
+ return -ENOMEM;
+ atomic_set(&ctx->ref_count, 1);
+ ctx->tx_seq_num = 1;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (old_ctx) {
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ kfree(ctx);
+ } else {
+ pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ }
+ spin_lock(&g_cdev.lock);
+ if ((g_cdev.active_pd == 0) &&
+ (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
+ oz_pd_get(pd);
+ g_cdev.active_pd = pd;
+ oz_trace("Active PD arrived.\n");
+ }
+ spin_unlock(&g_cdev.lock);
+ oz_trace("Serial service started.\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_cdev_stop(struct oz_pd *pd, int pause)
+{
+ struct oz_serial_ctx *ctx;
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
+ if (pause) {
+ oz_trace("Serial service paused.\n");
+ return;
+ }
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ if (ctx)
+ oz_cdev_release_ctx(ctx);
+ spin_lock(&g_cdev.lock);
+ if (pd == g_cdev.active_pd)
+ g_cdev.active_pd = 0;
+ else
+ pd = 0;
+ spin_unlock(&g_cdev.lock);
+ if (pd) {
+ oz_pd_put(pd);
+ oz_trace("Active PD departed.\n");
+ }
+ oz_trace("Serial service stopped.\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+ struct oz_serial_ctx *ctx;
+ struct oz_app_hdr *app_hdr;
+ u8 *data;
+ int len;
+ int space;
+ int copy_sz;
+ int ix;
+
+ ctx = oz_cdev_claim_ctx(pd);
+ if (ctx == 0) {
+ oz_trace("Cannot claim serial context.\n");
+ return;
+ }
+
+ app_hdr = (struct oz_app_hdr *)(elt+1);
+ /* If sequence number is non-zero then check it is not a duplicate.
+ */
+ if (app_hdr->elt_seq_num != 0) {
+ if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
+ /* Reject duplicate element. */
+ oz_trace("Duplicate element:%02x %02x\n",
+ app_hdr->elt_seq_num, ctx->rx_seq_num);
+ goto out;
+ }
+ }
+ ctx->rx_seq_num = app_hdr->elt_seq_num;
+ len = elt->length - sizeof(struct oz_app_hdr);
+ data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
+ if (len <= 0)
+ goto out;
+ space = ctx->rd_out - ctx->rd_in - 1;
+ if (space < 0)
+ space += OZ_RD_BUF_SZ;
+ if (len > space) {
+ oz_trace("Not enough space:%d %d\n", len, space);
+ len = space;
+ }
+ ix = ctx->rd_in;
+ copy_sz = OZ_RD_BUF_SZ - ix;
+ if (copy_sz > len)
+ copy_sz = len;
+ memcpy(&ctx->rd_buf[ix], data, copy_sz);
+ len -= copy_sz;
+ ix += copy_sz;
+ if (ix == OZ_RD_BUF_SZ)
+ ix = 0;
+ if (len) {
+ memcpy(ctx->rd_buf, data+copy_sz, len);
+ ix = len;
+ }
+ ctx->rd_in = ix;
+ wake_up(&g_cdev.rdq);
+out:
+ oz_cdev_release_ctx(ctx);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_cdev_heartbeat(struct oz_pd *pd)
+{
+}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
new file mode 100644
index 00000000000..698014bb8d7
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.h
@@ -0,0 +1,18 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZCDEV_H
+#define _OZCDEV_H
+
+int oz_cdev_register(void);
+int oz_cdev_deregister(void);
+int oz_cdev_init(void);
+void oz_cdev_term(void);
+int oz_cdev_start(struct oz_pd *pd, int resume);
+void oz_cdev_stop(struct oz_pd *pd, int pause);
+void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
+void oz_cdev_heartbeat(struct oz_pd *pd);
+
+#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
new file mode 100644
index 00000000000..43e6373a009
--- /dev/null
+++ b/drivers/staging/ozwpan/ozconfig.h
@@ -0,0 +1,27 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+#ifndef _OZCONFIG_H
+#define _OZCONFIG_H
+
+/* #define WANT_TRACE */
+#ifdef WANT_TRACE
+#define WANT_VERBOSE_TRACE
+#endif /* #ifdef WANT_TRACE */
+/* #define WANT_URB_PARANOIA */
+
+/* #define WANT_PRE_2_6_39 */
+#define WANT_EVENT_TRACE
+
+/* These defines determine what verbose trace is displayed. */
+#ifdef WANT_VERBOSE_TRACE
+/* #define WANT_TRACE_STREAM */
+/* #define WANT_TRACE_URB */
+/* #define WANT_TRACE_CTRL_DETAIL */
+#define WANT_TRACE_HUB
+/* #define WANT_TRACE_RX_FRAMES */
+/* #define WANT_TRACE_TX_FRAMES */
+#endif /* WANT_VERBOSE_TRACE */
+
+#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
new file mode 100644
index 00000000000..988f522475d
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -0,0 +1,339 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "oztrace.h"
+/*------------------------------------------------------------------------------
+ */
+#define OZ_ELT_INFO_MAGIC_USED 0x35791057
+#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_elt_buf_init(struct oz_elt_buf *buf)
+{
+ memset(buf, 0, sizeof(struct oz_elt_buf));
+ INIT_LIST_HEAD(&buf->stream_list);
+ INIT_LIST_HEAD(&buf->order_list);
+ INIT_LIST_HEAD(&buf->isoc_list);
+ buf->max_free_elts = 32;
+ spin_lock_init(&buf->lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_elt_buf_term(struct oz_elt_buf *buf)
+{
+ struct list_head *e;
+ int i;
+ /* Free any elements in the order or isoc lists. */
+ for (i = 0; i < 2; i++) {
+ struct list_head *list;
+ if (i)
+ list = &buf->order_list;
+ else
+ list = &buf->isoc_list;
+ e = list->next;
+ while (e != list) {
+ struct oz_elt_info *ei =
+ container_of(e, struct oz_elt_info, link_order);
+ e = e->next;
+ kfree(ei);
+ }
+ }
+ /* Free any elelment in the pool. */
+ while (buf->elt_pool) {
+ struct oz_elt_info *ei =
+ container_of(buf->elt_pool, struct oz_elt_info, link);
+ buf->elt_pool = buf->elt_pool->next;
+ kfree(ei);
+ }
+ buf->free_elts = 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
+{
+ struct oz_elt_info *ei = 0;
+ spin_lock_bh(&buf->lock);
+ if (buf->free_elts && buf->elt_pool) {
+ ei = container_of(buf->elt_pool, struct oz_elt_info, link);
+ buf->elt_pool = ei->link.next;
+ buf->free_elts--;
+ spin_unlock_bh(&buf->lock);
+ if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
+ oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
+ ei->magic);
+ }
+ } else {
+ spin_unlock_bh(&buf->lock);
+ ei = kmalloc(sizeof(struct oz_elt_info), GFP_ATOMIC);
+ }
+ if (ei) {
+ ei->flags = 0;
+ ei->app_id = 0;
+ ei->callback = 0;
+ ei->context = 0;
+ ei->stream = 0;
+ ei->magic = OZ_ELT_INFO_MAGIC_USED;
+ INIT_LIST_HEAD(&ei->link);
+ INIT_LIST_HEAD(&ei->link_order);
+ }
+ return ei;
+}
+/*------------------------------------------------------------------------------
+ * Precondition: oz_elt_buf.lock must be held.
+ * Context: softirq or process
+ */
+void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
+{
+ if (ei) {
+ if (ei->magic == OZ_ELT_INFO_MAGIC_USED) {
+ buf->free_elts++;
+ ei->link.next = buf->elt_pool;
+ buf->elt_pool = &ei->link;
+ ei->magic = OZ_ELT_INFO_MAGIC_FREE;
+ } else {
+ oz_trace("oz_elt_info_free: bad magic ei: %p"
+ " magic: 0x%x\n",
+ ei, ei->magic);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
+{
+ struct list_head *e;
+ e = list->next;
+ spin_lock_bh(&buf->lock);
+ while (e != list) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ oz_elt_info_free(buf, ei);
+ }
+ spin_unlock_bh(&buf->lock);
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
+{
+ struct oz_elt_stream *st;
+
+ oz_trace("oz_elt_stream_create(0x%x)\n", id);
+
+ st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
+ if (st == 0)
+ return -ENOMEM;
+ atomic_set(&st->ref_count, 1);
+ st->id = id;
+ st->max_buf_count = max_buf_count;
+ INIT_LIST_HEAD(&st->elt_list);
+ spin_lock_bh(&buf->lock);
+ list_add_tail(&st->link, &buf->stream_list);
+ spin_unlock_bh(&buf->lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
+{
+ struct list_head *e;
+ struct oz_elt_stream *st;
+ oz_trace("oz_elt_stream_delete(0x%x)\n", id);
+ spin_lock_bh(&buf->lock);
+ e = buf->stream_list.next;
+ while (e != &buf->stream_list) {
+ st = container_of(e, struct oz_elt_stream, link);
+ if (st->id == id) {
+ list_del(e);
+ break;
+ }
+ st = 0;
+ }
+ if (!st) {
+ spin_unlock_bh(&buf->lock);
+ return -1;
+ }
+ e = st->elt_list.next;
+ while (e != &st->elt_list) {
+ struct oz_elt_info *ei =
+ container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ list_del_init(&ei->link);
+ list_del_init(&ei->link_order);
+ st->buf_count -= ei->length;
+ oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
+ st->buf_count,
+ ei->length, atomic_read(&st->ref_count));
+ oz_elt_stream_put(st);
+ oz_elt_info_free(buf, ei);
+ }
+ spin_unlock_bh(&buf->lock);
+ oz_elt_stream_put(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_elt_stream_get(struct oz_elt_stream *st)
+{
+ atomic_inc(&st->ref_count);
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_elt_stream_put(struct oz_elt_stream *st)
+{
+ if (atomic_dec_and_test(&st->ref_count)) {
+ oz_trace("Stream destroyed\n");
+ kfree(st);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Precondition: Element buffer lock must be held.
+ * If this function fails the caller is responsible for deallocating the elt
+ * info structure.
+ */
+int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
+ struct oz_elt_info *ei)
+{
+ struct oz_elt_stream *st = 0;
+ struct list_head *e;
+ if (id) {
+ list_for_each(e, &buf->stream_list) {
+ st = container_of(e, struct oz_elt_stream, link);
+ if (st->id == id)
+ break;
+ }
+ if (e == &buf->stream_list) {
+ /* Stream specified but stream not known so fail.
+ * Caller deallocates element info. */
+ return -1;
+ }
+ }
+ if (st) {
+ /* If this is an ISOC fixed element that needs a frame number
+ * then insert that now. Earlier we stored the unit count in
+ * this field.
+ */
+ struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
+ &ei->data[sizeof(struct oz_elt)];
+ if ((body->app_id == OZ_APPID_USB) && (body->type
+ == OZ_USB_ENDPOINT_DATA) &&
+ (body->format == OZ_DATA_F_ISOC_FIXED)) {
+ u8 unit_count = body->frame_number;
+ body->frame_number = st->frame_number;
+ st->frame_number += unit_count;
+ }
+ /* Claim stream and update accounts */
+ oz_elt_stream_get(st);
+ ei->stream = st;
+ st->buf_count += ei->length;
+ /* Add to list in stream. */
+ list_add_tail(&ei->link, &st->elt_list);
+ oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
+ st->buf_count, ei->length);
+ /* Check if we have too much buffered for this stream. If so
+ * start dropping elements until we are back in bounds.
+ */
+ while ((st->buf_count > st->max_buf_count) &&
+ !list_empty(&st->elt_list)) {
+ struct oz_elt_info *ei2 =
+ list_first_entry(&st->elt_list,
+ struct oz_elt_info, link);
+ list_del_init(&ei2->link);
+ list_del_init(&ei2->link_order);
+ st->buf_count -= ei2->length;
+ oz_elt_info_free(buf, ei2);
+ oz_elt_stream_put(st);
+ }
+ }
+ list_add_tail(&ei->link_order, isoc ?
+ &buf->isoc_list : &buf->order_list);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
+ unsigned max_len, struct list_head *list)
+{
+ int count = 0;
+ struct list_head *e;
+ struct list_head *el;
+ struct oz_elt_info *ei;
+ spin_lock_bh(&buf->lock);
+ if (isoc)
+ el = &buf->isoc_list;
+ else
+ el = &buf->order_list;
+ e = el->next;
+ while (e != el) {
+ struct oz_app_hdr *app_hdr;
+ ei = container_of(e, struct oz_elt_info, link_order);
+ e = e->next;
+ if ((*len + ei->length) <= max_len) {
+ app_hdr = (struct oz_app_hdr *)
+ &ei->data[sizeof(struct oz_elt)];
+ app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
+ if (buf->tx_seq_num[ei->app_id] == 0)
+ buf->tx_seq_num[ei->app_id] = 1;
+ *len += ei->length;
+ list_del(&ei->link);
+ list_del(&ei->link_order);
+ if (ei->stream) {
+ ei->stream->buf_count -= ei->length;
+ oz_trace2(OZ_TRACE_STREAM,
+ "Stream down: %d %d\n",
+ ei->stream->buf_count, ei->length);
+ oz_elt_stream_put(ei->stream);
+ ei->stream = 0;
+ }
+ INIT_LIST_HEAD(&ei->link_order);
+ list_add_tail(&ei->link, list);
+ count++;
+ } else {
+ break;
+ }
+ }
+ spin_unlock_bh(&buf->lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_are_elts_available(struct oz_elt_buf *buf)
+{
+ return buf->order_list.next != &buf->order_list;
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_trim_elt_pool(struct oz_elt_buf *buf)
+{
+ struct list_head *free = 0;
+ struct list_head *e;
+ spin_lock_bh(&buf->lock);
+ while (buf->free_elts > buf->max_free_elts) {
+ e = buf->elt_pool;
+ buf->elt_pool = e->next;
+ e->next = free;
+ free = e;
+ buf->free_elts--;
+ }
+ spin_unlock_bh(&buf->lock);
+ while (free) {
+ struct oz_elt_info *ei =
+ container_of(free, struct oz_elt_info, link);
+ free = free->next;
+ kfree(ei);
+ }
+}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
new file mode 100644
index 00000000000..03c12f57b9b
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.h
@@ -0,0 +1,70 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZELTBUF_H
+#define _OZELTBUF_H
+
+#include "ozprotocol.h"
+
+/*-----------------------------------------------------------------------------
+ */
+struct oz_pd;
+typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
+
+struct oz_elt_stream {
+ struct list_head link;
+ struct list_head elt_list;
+ atomic_t ref_count;
+ unsigned buf_count;
+ unsigned max_buf_count;
+ u8 frame_number;
+ u8 id;
+};
+
+#define OZ_MAX_ELT_PAYLOAD 255
+struct oz_elt_info {
+ struct list_head link;
+ struct list_head link_order;
+ u8 flags;
+ u8 app_id;
+ oz_elt_callback_t callback;
+ long context;
+ struct oz_elt_stream *stream;
+ u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
+ int length;
+ unsigned magic;
+};
+/* Flags values */
+#define OZ_EI_F_MARKED 0x1
+
+struct oz_elt_buf {
+ spinlock_t lock;
+ struct list_head stream_list;
+ struct list_head order_list;
+ struct list_head isoc_list;
+ struct list_head *elt_pool;
+ int free_elts;
+ int max_free_elts;
+ u8 tx_seq_num[OZ_NB_APPS];
+};
+
+int oz_elt_buf_init(struct oz_elt_buf *buf);
+void oz_elt_buf_term(struct oz_elt_buf *buf);
+struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
+void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
+void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
+int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
+int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
+void oz_elt_stream_get(struct oz_elt_stream *st);
+void oz_elt_stream_put(struct oz_elt_stream *st);
+int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
+ struct oz_elt_info *ei);
+int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
+ unsigned max_len, struct list_head *list);
+int oz_are_elts_available(struct oz_elt_buf *buf);
+void oz_trim_elt_pool(struct oz_elt_buf *buf);
+
+#endif /* _OZELTBUF_H */
+
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
new file mode 100644
index 00000000000..73703d3e96b
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.c
@@ -0,0 +1,116 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include "ozconfig.h"
+#ifdef WANT_EVENT_TRACE
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include "oztrace.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+unsigned long g_evt_mask = 0xffffffff;
+/*------------------------------------------------------------------------------
+ */
+#define OZ_MAX_EVTS 2048 /* Must be power of 2 */
+DEFINE_SPINLOCK(g_eventlock);
+static int g_evt_in;
+static int g_evt_out;
+static int g_missed_events;
+static struct oz_event g_events[OZ_MAX_EVTS];
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_event_init(void)
+{
+ oz_trace("Event tracing initialized\n");
+ g_evt_in = g_evt_out = 0;
+ g_missed_events = 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_event_term(void)
+{
+ oz_trace("Event tracing terminated\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4)
+{
+ unsigned long irqstate;
+ int ix;
+ spin_lock_irqsave(&g_eventlock, irqstate);
+ ix = (g_evt_in + 1) & (OZ_MAX_EVTS - 1);
+ if (ix != g_evt_out) {
+ struct oz_event *e = &g_events[g_evt_in];
+ e->jiffies = jiffies;
+ e->evt = evt;
+ e->ctx1 = ctx1;
+ e->ctx2 = ctx2;
+ e->ctx3 = ctx3;
+ e->ctx4 = ctx4;
+ g_evt_in = ix;
+ } else {
+ g_missed_events++;
+ }
+ spin_unlock_irqrestore(&g_eventlock, irqstate);
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_events_copy(struct oz_evtlist __user *lst)
+{
+ int first;
+ int ix;
+ struct hdr {
+ int count;
+ int missed;
+ } hdr;
+ ix = g_evt_out;
+ hdr.count = g_evt_in - ix;
+ if (hdr.count < 0)
+ hdr.count += OZ_MAX_EVTS;
+ if (hdr.count > OZ_EVT_LIST_SZ)
+ hdr.count = OZ_EVT_LIST_SZ;
+ hdr.missed = g_missed_events;
+ g_missed_events = 0;
+ if (copy_to_user((void __user *)lst, &hdr, sizeof(hdr)))
+ return -EFAULT;
+ first = OZ_MAX_EVTS - ix;
+ if (first > hdr.count)
+ first = hdr.count;
+ if (first) {
+ int sz = first*sizeof(struct oz_event);
+ void __user *p = (void __user *)lst->evts;
+ if (copy_to_user(p, &g_events[ix], sz))
+ return -EFAULT;
+ if (hdr.count > first) {
+ p = (void __user *)&lst->evts[first];
+ sz = (hdr.count-first)*sizeof(struct oz_event);
+ if (copy_to_user(p, g_events, sz))
+ return -EFAULT;
+ }
+ }
+ ix += hdr.count;
+ if (ix >= OZ_MAX_EVTS)
+ ix -= OZ_MAX_EVTS;
+ g_evt_out = ix;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_events_clear(void)
+{
+ unsigned long irqstate;
+ spin_lock_irqsave(&g_eventlock, irqstate);
+ g_evt_in = g_evt_out = 0;
+ g_missed_events = 0;
+ spin_unlock_irqrestore(&g_eventlock, irqstate);
+}
+#endif /* WANT_EVENT_TRACE */
+
diff --git a/drivers/staging/ozwpan/ozevent.h b/drivers/staging/ozwpan/ozevent.h
new file mode 100644
index 00000000000..f033d014c6f
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.h
@@ -0,0 +1,31 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZEVENT_H
+#define _OZEVENT_H
+#include "ozconfig.h"
+#include "ozeventdef.h"
+
+#ifdef WANT_EVENT_TRACE
+extern unsigned long g_evt_mask;
+void oz_event_init(void);
+void oz_event_term(void);
+void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4);
+#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4) \
+ do { \
+ if ((1<<(__evt)) & g_evt_mask) \
+ oz_event_log2(__evt, __ctx1, __ctx2, __ctx3, __ctx4); \
+ } while (0)
+int oz_events_copy(struct oz_evtlist __user *lst);
+void oz_events_clear(void);
+#else
+#define oz_event_init()
+#define oz_event_term()
+#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4)
+#define oz_events_copy(__lst)
+#define oz_events_clear()
+#endif /* WANT_EVENT_TRACE */
+
+#endif /* _OZEVENT_H */
diff --git a/drivers/staging/ozwpan/ozeventdef.h b/drivers/staging/ozwpan/ozeventdef.h
new file mode 100644
index 00000000000..a880288bab1
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeventdef.h
@@ -0,0 +1,47 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZEVENTDEF_H
+#define _OZEVENTDEF_H
+
+#define OZ_EVT_RX_FRAME 0
+#define OZ_EVT_RX_PROCESS 1
+#define OZ_EVT_TX_FRAME 2
+#define OZ_EVT_TX_ISOC 3
+#define OZ_EVT_URB_SUBMIT 4
+#define OZ_EVT_URB_DONE 5
+#define OZ_EVT_URB_CANCEL 6
+#define OZ_EVT_CTRL_REQ 7
+#define OZ_EVT_CTRL_CNF 8
+#define OZ_EVT_CTRL_LOCAL 9
+#define OZ_EVT_CONNECT_REQ 10
+#define OZ_EVT_CONNECT_RSP 11
+#define OZ_EVT_EP_CREDIT 12
+#define OZ_EVT_EP_BUFFERING 13
+#define OZ_EVT_TX_ISOC_DONE 14
+#define OZ_EVT_TX_ISOC_DROP 15
+#define OZ_EVT_TIMER_CTRL 16
+#define OZ_EVT_TIMER 17
+#define OZ_EVT_PD_STATE 18
+#define OZ_EVT_SERVICE 19
+#define OZ_EVT_DEBUG 20
+
+struct oz_event {
+ unsigned long jiffies;
+ unsigned char evt;
+ unsigned char ctx1;
+ unsigned short ctx2;
+ void *ctx3;
+ unsigned ctx4;
+};
+
+#define OZ_EVT_LIST_SZ 64
+struct oz_evtlist {
+ int count;
+ int missed;
+ struct oz_event evts[OZ_EVT_LIST_SZ];
+};
+
+#endif /* _OZEVENTDEF_H */
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
new file mode 100644
index 00000000000..750b14eb505
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -0,0 +1,2256 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file provides the implementation of a USB host controller device that
+ * does not have any associated hardware. Instead the virtual device is
+ * connected to the WiFi network and emulates the operation of a USB hcd by
+ * receiving and sending network frames.
+ * Note:
+ * We take great pains to reduce the amount of code where interrupts need to be
+ * disabled and in this respect we are different from standard HCD's. In
+ * particular we don't want in_irq() code bleeding over to the protocol side of
+ * the driver.
+ * The troublesome functions are the urb enqueue and dequeue functions both of
+ * which can be called in_irq(). So for these functions we put the urbs into a
+ * queue and request a tasklet to process them. This means that a spinlock with
+ * interrupts disabled must be held for insertion and removal but most code is
+ * is in tasklet or soft irq context. The lock that protects this list is called
+ * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
+ * when calling the following functions.
+ * usb_hcd_link_urb_to_ep()
+ * usb_hcd_unlink_urb_from_ep()
+ * usb_hcd_flush_endpoint()
+ * usb_hcd_check_unlink_urb()
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "linux/usb/hcd.h"
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozusbif.h"
+#include "oztrace.h"
+#include "ozurbparanoia.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * Number of units of buffering to capture for an isochronous IN endpoint before
+ * allowing data to be indicated up.
+ */
+#define OZ_IN_BUFFERING_UNITS 50
+/* Name of our platform device.
+ */
+#define OZ_PLAT_DEV_NAME "ozwpan"
+/* Maximum number of free urb links that can be kept in the pool.
+ */
+#define OZ_MAX_LINK_POOL_SIZE 16
+/* Get endpoint object from the containing link.
+ */
+#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
+/*------------------------------------------------------------------------------
+ * Used to link urbs together and also store some status information for each
+ * urb.
+ * A cache of these are kept in a pool to reduce number of calls to kmalloc.
+ */
+struct oz_urb_link {
+ struct list_head link;
+ struct urb *urb;
+ struct oz_port *port;
+ u8 req_id;
+ u8 ep_num;
+ unsigned long submit_jiffies;
+};
+
+/* Holds state information about a USB endpoint.
+ */
+struct oz_endpoint {
+ struct list_head urb_list; /* List of oz_urb_link items. */
+ struct list_head link; /* For isoc ep, links in to isoc
+ lists of oz_port. */
+ unsigned long last_jiffies;
+ int credit;
+ int credit_ceiling;
+ u8 ep_num;
+ u8 attrib;
+ u8 *buffer;
+ int buffer_size;
+ int in_ix;
+ int out_ix;
+ int buffered_units;
+ unsigned flags;
+ int start_frame;
+};
+/* Bits in the flags field. */
+#define OZ_F_EP_BUFFERING 0x1
+#define OZ_F_EP_HAVE_STREAM 0x2
+
+/* Holds state information about a USB interface.
+ */
+struct oz_interface {
+ unsigned ep_mask;
+ u8 alt;
+};
+
+/* Holds state information about an hcd port.
+ */
+#define OZ_NB_ENDPOINTS 16
+struct oz_port {
+ unsigned flags;
+ unsigned status;
+ void *hpd;
+ struct oz_hcd *ozhcd;
+ spinlock_t port_lock;
+ u8 bus_addr;
+ u8 next_req_id;
+ u8 config_num;
+ int num_iface;
+ struct oz_interface *iface;
+ struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
+ struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
+ struct list_head isoc_out_ep;
+ struct list_head isoc_in_ep;
+};
+#define OZ_PORT_F_PRESENT 0x1
+#define OZ_PORT_F_CHANGED 0x2
+#define OZ_PORT_F_DYING 0x4
+
+/* Data structure in the private context area of struct usb_hcd.
+ */
+#define OZ_NB_PORTS 8
+struct oz_hcd {
+ spinlock_t hcd_lock;
+ struct list_head urb_pending_list;
+ struct list_head urb_cancel_list;
+ struct list_head orphanage;
+ int conn_port; /* Port that is currently connecting, -1 if none.*/
+ struct oz_port ports[OZ_NB_PORTS];
+ uint flags;
+ struct usb_hcd *hcd;
+};
+/* Bits in flags field.
+ */
+#define OZ_HDC_F_SUSPENDED 0x1
+
+/*------------------------------------------------------------------------------
+ * Static function prototypes.
+ */
+static int oz_hcd_start(struct usb_hcd *hcd);
+static void oz_hcd_stop(struct usb_hcd *hcd);
+static void oz_hcd_shutdown(struct usb_hcd *hcd);
+static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
+static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
+static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
+ u16 windex, char *buf, u16 wlength);
+static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
+static int oz_hcd_bus_resume(struct usb_hcd *hcd);
+static int oz_plat_probe(struct platform_device *dev);
+static int oz_plat_remove(struct platform_device *dev);
+static void oz_plat_shutdown(struct platform_device *dev);
+static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
+static int oz_plat_resume(struct platform_device *dev);
+static void oz_urb_process_tasklet(unsigned long unused);
+static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port, struct usb_host_config *config,
+ gfp_t mem_flags);
+static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port);
+static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port,
+ struct usb_host_interface *intf, gfp_t mem_flags);
+static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port, int if_ix);
+static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
+ gfp_t mem_flags);
+static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
+ struct urb *urb);
+static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
+/*------------------------------------------------------------------------------
+ * Static external variables.
+ */
+static struct platform_device *g_plat_dev;
+static struct oz_hcd *g_ozhcd;
+static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
+static const char g_hcd_name[] = "Ozmo WPAN";
+static struct list_head *g_link_pool;
+static int g_link_pool_size;
+static DEFINE_SPINLOCK(g_link_lock);
+static DEFINE_SPINLOCK(g_tasklet_lock);
+static struct tasklet_struct g_urb_process_tasklet;
+static struct tasklet_struct g_urb_cancel_tasklet;
+static atomic_t g_pending_urbs = ATOMIC_INIT(0);
+static const struct hc_driver g_oz_hc_drv = {
+ .description = g_hcd_name,
+ .product_desc = "Ozmo Devices WPAN",
+ .hcd_priv_size = sizeof(struct oz_hcd),
+ .flags = HCD_USB11,
+ .start = oz_hcd_start,
+ .stop = oz_hcd_stop,
+ .shutdown = oz_hcd_shutdown,
+ .urb_enqueue = oz_hcd_urb_enqueue,
+ .urb_dequeue = oz_hcd_urb_dequeue,
+ .endpoint_disable = oz_hcd_endpoint_disable,
+ .endpoint_reset = oz_hcd_endpoint_reset,
+ .get_frame_number = oz_hcd_get_frame_number,
+ .hub_status_data = oz_hcd_hub_status_data,
+ .hub_control = oz_hcd_hub_control,
+ .bus_suspend = oz_hcd_bus_suspend,
+ .bus_resume = oz_hcd_bus_resume,
+};
+
+static struct platform_driver g_oz_plat_drv = {
+ .probe = oz_plat_probe,
+ .remove = oz_plat_remove,
+ .shutdown = oz_plat_shutdown,
+ .suspend = oz_plat_suspend,
+ .resume = oz_plat_resume,
+ .driver = {
+ .name = OZ_PLAT_DEV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+/*------------------------------------------------------------------------------
+ * Gets our private context area (which is of type struct oz_hcd) from the
+ * usb_hcd structure.
+ * Context: any
+ */
+static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
+{
+ return (struct oz_hcd *)hcd->hcd_priv;
+}
+/*------------------------------------------------------------------------------
+ * Searches list of ports to find the index of the one with a specified USB
+ * bus address. If none of the ports has the bus address then the connection
+ * port is returned, if there is one or -1 otherwise.
+ * Context: any
+ */
+static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
+{
+ int i;
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ if (ozhcd->ports[i].bus_addr == bus_addr)
+ return i;
+ }
+ return ozhcd->conn_port;
+}
+/*------------------------------------------------------------------------------
+ * Allocates an urb link, first trying the pool but going to heap if empty.
+ * Context: any
+ */
+static struct oz_urb_link *oz_alloc_urb_link(void)
+{
+ struct oz_urb_link *urbl = 0;
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ if (g_link_pool) {
+ urbl = container_of(g_link_pool, struct oz_urb_link, link);
+ g_link_pool = urbl->link.next;
+ --g_link_pool_size;
+ }
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ if (urbl == 0)
+ urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
+ return urbl;
+}
+/*------------------------------------------------------------------------------
+ * Frees an urb link by putting it in the pool if there is enough space or
+ * deallocating it to heap otherwise.
+ * Context: any
+ */
+static void oz_free_urb_link(struct oz_urb_link *urbl)
+{
+ if (urbl) {
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ if (g_link_pool_size < OZ_MAX_LINK_POOL_SIZE) {
+ urbl->link.next = g_link_pool;
+ g_link_pool = &urbl->link;
+ urbl = 0;
+ g_link_pool_size++;
+ }
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ if (urbl)
+ kfree(urbl);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Deallocates all the urb links in the pool.
+ * Context: unknown
+ */
+static void oz_empty_link_pool(void)
+{
+ struct list_head *e;
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ e = g_link_pool;
+ g_link_pool = 0;
+ g_link_pool_size = 0;
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ while (e) {
+ struct oz_urb_link *urbl =
+ container_of(e, struct oz_urb_link, link);
+ e = e->next;
+ kfree(urbl);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Allocates endpoint structure and optionally a buffer. If a buffer is
+ * allocated it immediately follows the endpoint structure.
+ * Context: softirq
+ */
+static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
+{
+ struct oz_endpoint *ep =
+ kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
+ if (ep) {
+ INIT_LIST_HEAD(&ep->urb_list);
+ INIT_LIST_HEAD(&ep->link);
+ ep->credit = -1;
+ if (buffer_size) {
+ ep->buffer_size = buffer_size;
+ ep->buffer = (u8 *)(ep+1);
+ }
+ }
+ return ep;
+}
+/*------------------------------------------------------------------------------
+ * Pre-condition: Must be called with g_tasklet_lock held and interrupts
+ * disabled.
+ * Context: softirq or process
+ */
+struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+{
+ struct oz_urb_link *urbl;
+ struct list_head *e;
+ list_for_each(e, &ozhcd->urb_cancel_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urb == urbl->urb) {
+ list_del_init(e);
+ return urbl;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * This is called when we have finished processing an urb. It unlinks it from
+ * the ep and returns it to the core.
+ * Context: softirq or process
+ */
+static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
+ int status, unsigned long submit_jiffies)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned long irq_state;
+ struct oz_urb_link *cancel_urbl = 0;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ /* Clear hcpriv which will prevent it being put in the cancel list
+ * in the event that an attempt is made to cancel it.
+ */
+ urb->hcpriv = 0;
+ /* Walk the cancel list in case the urb is already sitting there.
+ * Since we process the cancel list in a tasklet rather than in
+ * the dequeue function this could happen.
+ */
+ cancel_urbl = oz_uncancel_urb(ozhcd, urb);
+ /* Note: we release lock but do not enable local irqs.
+ * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
+ * or at least other host controllers disable interrupts at this point
+ * so we do the same. We must, however, release the lock otherwise a
+ * deadlock will occur if an urb is submitted to our driver in the urb
+ * completion function. Because we disable interrupts it is possible
+ * that the urb_enqueue function can be called with them disabled.
+ */
+ spin_unlock(&g_tasklet_lock);
+ if (oz_forget_urb(urb)) {
+ oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
+ } else {
+ static unsigned long last_time;
+ atomic_dec(&g_pending_urbs);
+ oz_trace2(OZ_TRACE_URB,
+ "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
+ jiffies, urb, status, jiffies-submit_jiffies,
+ jiffies-last_time, atomic_read(&g_pending_urbs));
+ last_time = jiffies;
+ oz_event_log(OZ_EVT_URB_DONE, 0, 0, urb, status);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ }
+ spin_lock(&g_tasklet_lock);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ if (cancel_urbl)
+ oz_free_urb_link(cancel_urbl);
+}
+/*------------------------------------------------------------------------------
+ * Deallocates an endpoint including deallocating any associated stream and
+ * returning any queued urbs to the core.
+ * Context: softirq
+ */
+static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
+{
+ oz_trace("oz_ep_free()\n");
+ if (port) {
+ struct list_head list;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ INIT_LIST_HEAD(&list);
+ if (ep->flags & OZ_F_EP_HAVE_STREAM)
+ oz_usb_stream_delete(port->hpd, ep->ep_num);
+ /* Transfer URBs to the orphanage while we hold the lock. */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ /* Note: this works even if ep->urb_list is empty.*/
+ list_replace_init(&ep->urb_list, &list);
+ /* Put the URBs in the orphanage. */
+ list_splice_tail(&list, &ozhcd->orphanage);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+ oz_trace("Freeing endpoint memory\n");
+ kfree(ep);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
+ struct urb *urb, u8 req_id)
+{
+ struct oz_urb_link *urbl;
+ struct oz_endpoint *ep;
+ int err = 0;
+ if (ep_addr >= OZ_NB_ENDPOINTS) {
+ oz_trace("Invalid endpoint number in oz_enqueue_ep_urb().\n");
+ return -EINVAL;
+ }
+ urbl = oz_alloc_urb_link();
+ if (!urbl)
+ return -ENOMEM;
+ urbl->submit_jiffies = jiffies;
+ urbl->urb = urb;
+ urbl->req_id = req_id;
+ urbl->ep_num = ep_addr;
+ /* Hold lock while we insert the URB into the list within the
+ * endpoint structure.
+ */
+ spin_lock_bh(&port->ozhcd->hcd_lock);
+ /* If the urb has been unlinked while out of any list then
+ * complete it now.
+ */
+ if (urb->unlinked) {
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ oz_trace("urb %p unlinked so complete immediately\n", urb);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_free_urb_link(urbl);
+ return 0;
+ }
+ if (in_dir)
+ ep = port->in_ep[ep_addr];
+ else
+ ep = port->out_ep[ep_addr];
+ if (ep && port->hpd) {
+ list_add_tail(&urbl->link, &ep->urb_list);
+ if (!in_dir && ep_addr && (ep->credit < 0)) {
+ ep->last_jiffies = jiffies;
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
+ 0, 0, ep->credit);
+ }
+ } else {
+ err = -EPIPE;
+ }
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ if (err)
+ oz_free_urb_link(urbl);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Removes an urb from the queue in the endpoint.
+ * Returns 0 if it is found and -EIDRM otherwise.
+ * Context: softirq
+ */
+static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
+ struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct oz_endpoint *ep;
+ spin_lock_bh(&port->ozhcd->hcd_lock);
+ if (in_dir)
+ ep = port->in_ep[ep_addr];
+ else
+ ep = port->out_ep[ep_addr];
+ if (ep) {
+ struct list_head *e;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del_init(e);
+ break;
+ }
+ urbl = 0;
+ }
+ }
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ if (urbl)
+ oz_free_urb_link(urbl);
+ return urbl ? 0 : -EIDRM;
+}
+/*------------------------------------------------------------------------------
+ * Finds an urb given its request id.
+ * Context: softirq
+ */
+static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
+ u8 req_id)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ struct urb *urb = 0;
+ struct oz_urb_link *urbl = 0;
+ struct oz_endpoint *ep;
+
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ep = port->out_ep[ep_ix];
+ if (ep) {
+ struct list_head *e;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->req_id == req_id) {
+ urb = urbl->urb;
+ list_del_init(e);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* If urb is non-zero then we we must have an urb link to delete.
+ */
+ if (urb)
+ oz_free_urb_link(urbl);
+ return urb;
+}
+/*------------------------------------------------------------------------------
+ * Pre-condition: Port lock must be held.
+ * Context: softirq
+ */
+static void oz_acquire_port(struct oz_port *port, void *hpd)
+{
+ INIT_LIST_HEAD(&port->isoc_out_ep);
+ INIT_LIST_HEAD(&port->isoc_in_ep);
+ port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
+ port->status |= USB_PORT_STAT_CONNECTION |
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ oz_usb_get(hpd);
+ port->hpd = hpd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static struct oz_hcd *oz_hcd_claim(void)
+{
+ struct oz_hcd *ozhcd;
+ spin_lock_bh(&g_hcdlock);
+ ozhcd = g_ozhcd;
+ if (ozhcd)
+ usb_get_hcd(ozhcd->hcd);
+ spin_unlock_bh(&g_hcdlock);
+ return ozhcd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static inline void oz_hcd_put(struct oz_hcd *ozhcd)
+{
+ if (ozhcd)
+ usb_put_hcd(ozhcd->hcd);
+}
+/*------------------------------------------------------------------------------
+ * This is called by the protocol handler to notify that a PD has arrived.
+ * We allocate a port to associate with the PD and create a structure for
+ * endpoint 0. This port is made the connection port.
+ * In the event that one of the other port is already a connection port then
+ * we fail.
+ * TODO We should be able to do better than fail and should be able remember
+ * that this port needs configuring and make it the connection port once the
+ * current connection port has been assigned an address. Collisions here are
+ * probably very rare indeed.
+ * Context: softirq
+ */
+void *oz_hcd_pd_arrived(void *hpd)
+{
+ int i;
+ void *hport = 0;
+ struct oz_hcd *ozhcd = 0;
+ struct oz_endpoint *ep;
+ oz_trace("oz_hcd_pd_arrived()\n");
+ ozhcd = oz_hcd_claim();
+ if (ozhcd == 0)
+ return 0;
+ /* Allocate an endpoint object in advance (before holding hcd lock) to
+ * use for out endpoint 0.
+ */
+ ep = oz_ep_alloc(GFP_ATOMIC, 0);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->conn_port >= 0) {
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ oz_trace("conn_port >= 0\n");
+ goto out;
+ }
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ struct oz_port *port = &ozhcd->ports[i];
+ spin_lock(&port->port_lock);
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ oz_acquire_port(port, hpd);
+ spin_unlock(&port->port_lock);
+ break;
+ }
+ spin_unlock(&port->port_lock);
+ }
+ if (i < OZ_NB_PORTS) {
+ oz_trace("Setting conn_port = %d\n", i);
+ ozhcd->conn_port = i;
+ /* Attach out endpoint 0.
+ */
+ ozhcd->ports[i].out_ep[0] = ep;
+ ep = 0;
+ hport = &ozhcd->ports[i];
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
+ oz_trace("Resuming root hub\n");
+ usb_hcd_resume_root_hub(ozhcd->hcd);
+ }
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+ } else {
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+out:
+ if (ep) /* ep is non-null if not used. */
+ oz_ep_free(0, ep);
+ oz_hcd_put(ozhcd);
+ return hport;
+}
+/*------------------------------------------------------------------------------
+ * This is called by the protocol handler to notify that the PD has gone away.
+ * We need to deallocate all resources and then request that the root hub is
+ * polled. We release the reference we hold on the PD.
+ * Context: softirq
+ */
+void oz_hcd_pd_departed(void *hport)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd;
+ void *hpd;
+ struct oz_endpoint *ep = 0;
+
+ oz_trace("oz_hcd_pd_departed()\n");
+ if (port == 0) {
+ oz_trace("oz_hcd_pd_departed() port = 0\n");
+ return;
+ }
+ ozhcd = port->ozhcd;
+ if (ozhcd == 0)
+ return;
+ /* Check if this is the connection port - if so clear it.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if ((ozhcd->conn_port >= 0) &&
+ (port == &ozhcd->ports[ozhcd->conn_port])) {
+ oz_trace("Clearing conn_port\n");
+ ozhcd->conn_port = -1;
+ }
+ spin_lock(&port->port_lock);
+ port->flags |= OZ_PORT_F_DYING;
+ spin_unlock(&port->port_lock);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+
+ oz_clean_endpoints_for_config(ozhcd->hcd, port);
+ spin_lock_bh(&port->port_lock);
+ hpd = port->hpd;
+ port->hpd = 0;
+ port->bus_addr = 0xff;
+ port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
+ port->flags |= OZ_PORT_F_CHANGED;
+ port->status &= ~USB_PORT_STAT_CONNECTION;
+ port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
+ /* If there is an endpont 0 then clear the pointer while we hold
+ * the spinlock be we deallocate it after releasing the lock.
+ */
+ if (port->out_ep[0]) {
+ ep = port->out_ep[0];
+ port->out_ep[0] = 0;
+ }
+ spin_unlock_bh(&port->port_lock);
+ if (ep)
+ oz_ep_free(port, ep);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+ oz_usb_put(hpd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_pd_reset(void *hpd, void *hport)
+{
+ /* Cleanup the current configuration and report reset to the core.
+ */
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ oz_trace("PD Reset\n");
+ spin_lock_bh(&port->port_lock);
+ port->flags |= OZ_PORT_F_CHANGED;
+ port->status |= USB_PORT_STAT_RESET;
+ port->status |= (USB_PORT_STAT_C_RESET << 16);
+ spin_unlock_bh(&port->port_lock);
+ oz_clean_endpoints_for_config(ozhcd->hcd, port);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, u8 *desc,
+ int length, int offset, int total_size)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct urb *urb;
+ int err = 0;
+
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, status);
+ oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
+ length, offset, total_size);
+ urb = oz_find_urb_by_id(port, 0, req_id);
+ if (!urb)
+ return;
+ if (status == 0) {
+ int copy_len;
+ int required_size = urb->transfer_buffer_length;
+ if (required_size > total_size)
+ required_size = total_size;
+ copy_len = required_size-offset;
+ if (length <= copy_len)
+ copy_len = length;
+ memcpy(urb->transfer_buffer+offset, desc, copy_len);
+ offset += copy_len;
+ if (offset < required_size) {
+ struct usb_ctrlrequest *setup =
+ (struct usb_ctrlrequest *)urb->setup_packet;
+ unsigned wvalue = le16_to_cpu(setup->wValue);
+ if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
+ err = -ENOMEM;
+ else if (oz_usb_get_desc_req(port->hpd, req_id,
+ setup->bRequestType, (u8)(wvalue>>8),
+ (u8)wvalue, setup->wIndex, offset,
+ required_size-offset)) {
+ oz_dequeue_ep_urb(port, 0, 0, urb);
+ err = -ENOMEM;
+ }
+ if (err == 0)
+ return;
+ }
+ }
+ urb->actual_length = total_size;
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+#ifdef WANT_TRACE
+static void oz_display_conf_type(u8 t)
+{
+ switch (t) {
+ case USB_REQ_GET_STATUS:
+ oz_trace("USB_REQ_GET_STATUS - cnf\n");
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
+ break;
+ case USB_REQ_SET_FEATURE:
+ oz_trace("USB_REQ_SET_FEATURE - cnf\n");
+ break;
+ case USB_REQ_SET_ADDRESS:
+ oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
+ break;
+ case USB_REQ_GET_DESCRIPTOR:
+ oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ break;
+ case USB_REQ_SET_DESCRIPTOR:
+ oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
+ break;
+ case USB_REQ_GET_CONFIGURATION:
+ oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
+ break;
+ case USB_REQ_GET_INTERFACE:
+ oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
+ break;
+ case USB_REQ_SYNCH_FRAME:
+ oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
+ break;
+ }
+}
+#else
+#define oz_display_conf_type(__x)
+#endif /* WANT_TRACE */
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
+ u8 rcode, u8 config_num)
+{
+ int rc = 0;
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ if (rcode == 0) {
+ port->config_num = config_num;
+ oz_clean_endpoints_for_config(hcd, port);
+ if (oz_build_endpoints_for_config(hcd, port,
+ &urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
+ rc = -ENOMEM;
+ }
+ } else {
+ rc = -ENOMEM;
+ }
+ oz_complete_urb(hcd, urb, rc, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
+ u8 rcode, u8 if_num, u8 alt)
+{
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ int rc = 0;
+ if (rcode == 0) {
+ struct usb_host_config *config;
+ struct usb_host_interface *intf;
+ oz_trace("Set interface %d alt %d\n", if_num, alt);
+ oz_clean_endpoints_for_interface(hcd, port, if_num);
+ config = &urb->dev->config[port->config_num-1];
+ intf = &config->intf_cache[if_num]->altsetting[alt];
+ if (oz_build_endpoints_for_interface(hcd, port, intf,
+ GFP_ATOMIC))
+ rc = -ENOMEM;
+ else
+ port->iface[if_num].alt = alt;
+ } else {
+ rc = -ENOMEM;
+ }
+ oz_complete_urb(hcd, urb, rc, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
+ int data_len)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct urb *urb;
+ struct usb_ctrlrequest *setup;
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ unsigned windex;
+ unsigned wvalue;
+
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, rcode);
+ oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
+ urb = oz_find_urb_by_id(port, 0, req_id);
+ if (!urb) {
+ oz_trace("URB not found\n");
+ return;
+ }
+ setup = (struct usb_ctrlrequest *)urb->setup_packet;
+ windex = le16_to_cpu(setup->wIndex);
+ wvalue = le16_to_cpu(setup->wValue);
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ /* Standard requests */
+ oz_display_conf_type(setup->bRequest);
+ switch (setup->bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ oz_hcd_complete_set_config(port, urb, rcode,
+ (u8)wvalue);
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_hcd_complete_set_interface(port, urb, rcode,
+ (u8)windex, (u8)wvalue);
+ break;
+ default:
+ oz_complete_urb(hcd, urb, 0, 0);
+ }
+
+ } else {
+ int copy_len;
+ oz_trace("VENDOR-CLASS - cnf\n");
+ if (data_len <= urb->transfer_buffer_length)
+ copy_len = data_len;
+ else
+ copy_len = urb->transfer_buffer_length;
+ if (copy_len)
+ memcpy(urb->transfer_buffer, data, copy_len);
+ urb->actual_length = copy_len;
+ oz_complete_urb(hcd, urb, 0, 0);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static int oz_hcd_buffer_data(struct oz_endpoint *ep, u8 *data, int data_len)
+{
+ int space;
+ int copy_len;
+ if (!ep->buffer)
+ return -1;
+ space = ep->out_ix-ep->in_ix-1;
+ if (space < 0)
+ space += ep->buffer_size;
+ if (space < (data_len+1)) {
+ oz_trace("Buffer full\n");
+ return -1;
+ }
+ ep->buffer[ep->in_ix] = (u8)data_len;
+ if (++ep->in_ix == ep->buffer_size)
+ ep->in_ix = 0;
+ copy_len = ep->buffer_size - ep->in_ix;
+ if (copy_len > data_len)
+ copy_len = data_len;
+ memcpy(&ep->buffer[ep->in_ix], data, copy_len);
+
+ if (copy_len < data_len) {
+ memcpy(ep->buffer, data+copy_len, data_len-copy_len);
+ ep->in_ix = data_len-copy_len;
+ } else {
+ ep->in_ix += copy_len;
+ }
+ if (ep->in_ix == ep->buffer_size)
+ ep->in_ix = 0;
+ ep->buffered_units++;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_endpoint *ep;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
+ if (ep == 0)
+ goto done;
+ switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_BULK:
+ if (!list_empty(&ep->urb_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ struct urb *urb;
+ int copy_len;
+ list_del_init(&urbl->link);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ urb = urbl->urb;
+ oz_free_urb_link(urbl);
+ if (data_len <= urb->transfer_buffer_length)
+ copy_len = data_len;
+ else
+ copy_len = urb->transfer_buffer_length;
+ memcpy(urb->transfer_buffer, data, copy_len);
+ urb->actual_length = copy_len;
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ return;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ oz_hcd_buffer_data(ep, data, data_len);
+ break;
+ }
+done:
+ spin_unlock_bh(&ozhcd->hcd_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static inline int oz_usb_get_frame_number(void)
+{
+ return jiffies_to_msecs(get_jiffies_64());
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_hcd_heartbeat(void *hport)
+{
+ int rc = 0;
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ struct oz_urb_link *urbl;
+ struct list_head xfr_list;
+ struct list_head *e;
+ struct list_head *n;
+ struct urb *urb;
+ struct oz_endpoint *ep;
+ unsigned long now = jiffies;
+ INIT_LIST_HEAD(&xfr_list);
+ /* Check the OUT isoc endpoints to see if any URB data can be sent.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each(e, &port->isoc_out_ep) {
+ ep = ep_from_link(e);
+ if (ep->credit < 0)
+ continue;
+ ep->credit += (now - ep->last_jiffies);
+ if (ep->credit > ep->credit_ceiling)
+ ep->credit = ep->credit_ceiling;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ while (ep->credit && !list_empty(&ep->urb_list)) {
+ urbl = list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ urb = urbl->urb;
+ if (ep->credit < urb->number_of_packets)
+ break;
+ ep->credit -= urb->number_of_packets;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0,
+ ep->credit);
+ list_del(&urbl->link);
+ list_add_tail(&urbl->link, &xfr_list);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* Send to PD and complete URBs.
+ */
+ list_for_each_safe(e, n, &xfr_list) {
+ unsigned long t;
+ urbl = container_of(e, struct oz_urb_link, link);
+ urb = urbl->urb;
+ t = urbl->submit_jiffies;
+ list_del_init(e);
+ urb->error_count = 0;
+ urb->start_frame = oz_usb_get_frame_number();
+ oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
+ oz_free_urb_link(urbl);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
+ }
+ /* Check the IN isoc endpoints to see if any URBs can be completed.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each(e, &port->isoc_in_ep) {
+ struct oz_endpoint *ep = ep_from_link(e);
+ if (ep->flags & OZ_F_EP_BUFFERING) {
+ if (ep->buffered_units * OZ_IN_BUFFERING_UNITS) {
+ ep->flags &= ~OZ_F_EP_BUFFERING;
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT,
+ ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ ep->start_frame = 0;
+ oz_event_log(OZ_EVT_EP_BUFFERING,
+ ep->ep_num | USB_DIR_IN, 0, 0, 0);
+ }
+ continue;
+ }
+ ep->credit += (now - ep->last_jiffies);
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ while (!list_empty(&ep->urb_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ struct urb *urb = urbl->urb;
+ int len = 0;
+ int copy_len;
+ int i;
+ if (ep->credit < urb->number_of_packets)
+ break;
+ if (ep->buffered_units < urb->number_of_packets)
+ break;
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ len = ep->buffer[ep->out_ix];
+ if (++ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+ copy_len = ep->buffer_size - ep->out_ix;
+ if (copy_len > len)
+ copy_len = len;
+ memcpy(urb->transfer_buffer,
+ &ep->buffer[ep->out_ix], copy_len);
+ if (copy_len < len) {
+ memcpy(urb->transfer_buffer+copy_len,
+ ep->buffer, len-copy_len);
+ ep->out_ix = len-copy_len;
+ } else
+ ep->out_ix += copy_len;
+ if (ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+ urb->iso_frame_desc[i].offset =
+ urb->actual_length;
+ urb->actual_length += len;
+ urb->iso_frame_desc[i].actual_length = len;
+ urb->iso_frame_desc[i].status = 0;
+ }
+ ep->buffered_units -= urb->number_of_packets;
+ urb->error_count = 0;
+ urb->start_frame = ep->start_frame;
+ ep->start_frame += urb->number_of_packets;
+ list_del(&urbl->link);
+ list_add_tail(&urbl->link, &xfr_list);
+ ep->credit -= urb->number_of_packets;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ }
+ }
+ if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
+ rc = 1;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* Complete the filled URBs.
+ */
+ list_for_each_safe(e, n, &xfr_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ urb = urbl->urb;
+ list_del_init(e);
+ oz_free_urb_link(urbl);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ }
+ /* Check if there are any ep0 requests that have timed out.
+ * If so resent to PD.
+ */
+ ep = port->out_ep[0];
+ if (ep) {
+ struct list_head *e;
+ struct list_head *n;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each_safe(e, n, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (time_after(now, urbl->submit_jiffies+HZ/2)) {
+ oz_trace("%ld: Request 0x%p timeout\n",
+ now, urbl->urb);
+ urbl->submit_jiffies = now;
+ list_del(e);
+ list_add_tail(e, &xfr_list);
+ }
+ }
+ if (!list_empty(&ep->urb_list))
+ rc = 1;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ e = xfr_list.next;
+ while (e != &xfr_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ e = e->next;
+ oz_trace("Resending request to PD.\n");
+ oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
+ oz_free_urb_link(urbl);
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port,
+ struct usb_host_interface *intf, gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ int if_ix = intf->desc.bInterfaceNumber;
+ int request_heartbeat = 0;
+ oz_trace("interface[%d] = %p\n", if_ix, intf);
+ for (i = 0; i < intf->desc.bNumEndpoints; i++) {
+ struct usb_host_endpoint *hep = &intf->endpoint[i];
+ u8 ep_addr = hep->desc.bEndpointAddress;
+ u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
+ struct oz_endpoint *ep;
+ int buffer_size = 0;
+
+ oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
+ if ((ep_addr & USB_ENDPOINT_DIR_MASK) &&
+ ((hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC)) {
+ buffer_size = 24*1024;
+ }
+
+ ep = oz_ep_alloc(mem_flags, buffer_size);
+ if (!ep) {
+ oz_clean_endpoints_for_interface(hcd, port, if_ix);
+ return -ENOMEM;
+ }
+ ep->attrib = hep->desc.bmAttributes;
+ ep->ep_num = ep_num;
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ oz_trace("wMaxPacketSize = %d\n",
+ hep->desc.wMaxPacketSize);
+ ep->credit_ceiling = 200;
+ if (ep_addr & USB_ENDPOINT_DIR_MASK) {
+ ep->flags |= OZ_F_EP_BUFFERING;
+ oz_event_log(OZ_EVT_EP_BUFFERING,
+ ep->ep_num | USB_DIR_IN, 1, 0, 0);
+ } else {
+ ep->flags |= OZ_F_EP_HAVE_STREAM;
+ if (oz_usb_stream_create(port->hpd, ep_num))
+ ep->flags &= ~OZ_F_EP_HAVE_STREAM;
+ }
+ }
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ep_addr & USB_ENDPOINT_DIR_MASK) {
+ port->in_ep[ep_num] = ep;
+ port->iface[if_ix].ep_mask |=
+ (1<<(ep_num+OZ_NB_ENDPOINTS));
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ list_add_tail(&ep->link, &port->isoc_in_ep);
+ request_heartbeat = 1;
+ }
+ } else {
+ port->out_ep[ep_num] = ep;
+ port->iface[if_ix].ep_mask |= (1<<ep_num);
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ list_add_tail(&ep->link, &port->isoc_out_ep);
+ request_heartbeat = 1;
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (request_heartbeat && port->hpd)
+ oz_usb_request_heartbeat(port->hpd);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port, int if_ix)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ unsigned mask;
+ int i;
+ struct list_head ep_list;
+
+ oz_trace("Deleting endpoints for interface %d\n", if_ix);
+ if (if_ix >= port->num_iface)
+ return;
+ INIT_LIST_HEAD(&ep_list);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ mask = port->iface[if_ix].ep_mask;
+ port->iface[if_ix].ep_mask = 0;
+ for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
+ struct list_head *e;
+ /* Gather OUT endpoints.
+ */
+ if ((mask & (1<<i)) && port->out_ep[i]) {
+ e = &port->out_ep[i]->link;
+ port->out_ep[i] = 0;
+ /* Remove from isoc list if present.
+ */
+ list_del(e);
+ list_add_tail(e, &ep_list);
+ }
+ /* Gather IN endpoints.
+ */
+ if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
+ e = &port->in_ep[i]->link;
+ port->in_ep[i] = 0;
+ list_del(e);
+ list_add_tail(e, &ep_list);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ while (!list_empty(&ep_list)) {
+ struct oz_endpoint *ep =
+ list_first_entry(&ep_list, struct oz_endpoint, link);
+ list_del_init(&ep->link);
+ oz_ep_free(port, ep);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port, struct usb_host_config *config,
+ gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ int num_iface = config->desc.bNumInterfaces;
+ if (num_iface) {
+ struct oz_interface *iface;
+
+ iface = kmalloc(num_iface*sizeof(struct oz_interface),
+ mem_flags | __GFP_ZERO);
+ if (!iface)
+ return -ENOMEM;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ port->iface = iface;
+ port->num_iface = num_iface;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+ for (i = 0; i < num_iface; i++) {
+ struct usb_host_interface *intf =
+ &config->intf_cache[i]->altsetting[0];
+ if (oz_build_endpoints_for_interface(hcd, port, intf,
+ mem_flags))
+ goto fail;
+ }
+ return 0;
+fail:
+ oz_clean_endpoints_for_config(hcd, port);
+ return -1;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ oz_trace("Deleting endpoints for configuration.\n");
+ for (i = 0; i < port->num_iface; i++)
+ oz_clean_endpoints_for_interface(hcd, port, i);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (port->iface) {
+ oz_trace("Freeing interfaces object.\n");
+ kfree(port->iface);
+ port->iface = 0;
+ }
+ port->num_iface = 0;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void *oz_claim_hpd(struct oz_port *port)
+{
+ void *hpd = 0;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ hpd = port->hpd;
+ if (hpd)
+ oz_usb_get(hpd);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return hpd;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct usb_ctrlrequest *setup;
+ unsigned windex;
+ unsigned wvalue;
+ unsigned wlength;
+ void *hpd = 0;
+ u8 req_id;
+ int rc = 0;
+ unsigned complete = 0;
+
+ int port_ix = -1;
+ struct oz_port *port = 0;
+
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
+ port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ if (port_ix < 0) {
+ rc = -EPIPE;
+ goto out;
+ }
+ port = &ozhcd->ports[port_ix];
+ if (((port->flags & OZ_PORT_F_PRESENT) == 0)
+ || (port->flags & OZ_PORT_F_DYING)) {
+ oz_trace("Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
+ rc = -EPIPE;
+ goto out;
+ }
+ /* Store port in private context data.
+ */
+ urb->hcpriv = port;
+ setup = (struct usb_ctrlrequest *)urb->setup_packet;
+ windex = le16_to_cpu(setup->wIndex);
+ wvalue = le16_to_cpu(setup->wValue);
+ wlength = le16_to_cpu(setup->wLength);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
+ setup->bRequestType);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
+
+ req_id = port->next_req_id++;
+ hpd = oz_claim_hpd(port);
+ if (hpd == 0) {
+ oz_trace("Cannot claim port\n");
+ rc = -EPIPE;
+ goto out;
+ }
+
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ /* Standard requests
+ */
+ switch (setup->bRequest) {
+ case USB_REQ_GET_DESCRIPTOR:
+ oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
+ break;
+ case USB_REQ_SET_ADDRESS:
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
+ 0, 0, setup->bRequestType);
+ oz_trace("USB_REQ_SET_ADDRESS - req\n");
+ oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
+ (u8)le16_to_cpu(setup->wValue));
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->conn_port >= 0) {
+ ozhcd->ports[ozhcd->conn_port].bus_addr =
+ (u8)le16_to_cpu(setup->wValue);
+ oz_trace("Clearing conn_port\n");
+ ozhcd->conn_port = -1;
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ complete = 1;
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
+ break;
+ case USB_REQ_GET_CONFIGURATION:
+ /* We short curcuit this case and reply directly since
+ * we have the selected configuration number cached.
+ */
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
+ setup->bRequestType);
+ oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
+ if (urb->transfer_buffer_length >= 1) {
+ urb->actual_length = 1;
+ *((u8 *)urb->transfer_buffer) =
+ port->config_num;
+ complete = 1;
+ } else {
+ rc = -EPIPE;
+ }
+ break;
+ case USB_REQ_GET_INTERFACE:
+ /* We short curcuit this case and reply directly since
+ * we have the selected interface alternative cached.
+ */
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
+ setup->bRequestType);
+ oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
+ if (urb->transfer_buffer_length >= 1) {
+ urb->actual_length = 1;
+ *((u8 *)urb->transfer_buffer) =
+ port->iface[(u8)windex].alt;
+ oz_trace("interface = %d alt = %d\n",
+ windex, port->iface[(u8)windex].alt);
+ complete = 1;
+ } else {
+ rc = -EPIPE;
+ }
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_trace("USB_REQ_SET_INTERFACE - req\n");
+ break;
+ }
+ }
+ if (!rc && !complete) {
+ int data_len = 0;
+ if ((setup->bRequestType & USB_DIR_IN) == 0)
+ data_len = wlength;
+ if (oz_usb_control_req(port->hpd, req_id, setup,
+ urb->transfer_buffer, data_len)) {
+ rc = -ENOMEM;
+ } else {
+ /* Note: we are queuing the request after we have
+ * submitted it to be tranmitted. If the request were
+ * to complete before we queued it then it would not
+ * be found in the queue. It seems impossible for
+ * this to happen but if it did the request would
+ * be resubmitted so the problem would hopefully
+ * resolve itself. Putting the request into the
+ * queue before it has been sent is worse since the
+ * urb could be cancelled while we are using it
+ * to build the request.
+ */
+ if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
+ rc = -ENOMEM;
+ }
+ }
+ oz_usb_put(hpd);
+out:
+ if (rc || complete) {
+ oz_trace("Completing request locally\n");
+ oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ } else {
+ oz_usb_request_heartbeat(port->hpd);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
+{
+ int rc = 0;
+ struct oz_port *port = urb->hcpriv;
+ u8 ep_addr;
+ /* When we are paranoid we keep a list of urbs which we check against
+ * before handing one back. This is just for debugging during
+ * development and should be turned off in the released driver.
+ */
+ oz_remember_urb(urb);
+ /* Check buffer is valid.
+ */
+ if (!urb->transfer_buffer && urb->transfer_buffer_length)
+ return -EINVAL;
+ /* Check if there is a device at the port - refuse if not.
+ */
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0)
+ return -EPIPE;
+ ep_addr = usb_pipeendpoint(urb->pipe);
+ if (ep_addr) {
+ /* If the request is not for EP0 then queue it.
+ */
+ if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
+ urb, 0))
+ rc = -EPIPE;
+ } else {
+ oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_urb_process_tasklet(unsigned long unused)
+{
+ unsigned long irq_state;
+ struct urb *urb;
+ struct oz_hcd *ozhcd = oz_hcd_claim();
+ int rc = 0;
+ if (ozhcd == 0)
+ return;
+ /* This is called from a tasklet so is in softirq context but the urb
+ * list is filled from any context so we need to lock
+ * appropriately while removing urbs.
+ */
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ while (!list_empty(&ozhcd->urb_pending_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ozhcd->urb_pending_list,
+ struct oz_urb_link, link);
+ list_del_init(&urbl->link);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urb = urbl->urb;
+ oz_free_urb_link(urbl);
+ rc = oz_urb_process(ozhcd, urb);
+ if (rc)
+ oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_hcd_put(ozhcd);
+}
+/*------------------------------------------------------------------------------
+ * This function searches for the urb in any of the lists it could be in.
+ * If it is found it is removed from the list and completed. If the urb is
+ * being processed then it won't be in a list so won't be found. However, the
+ * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
+ * to a non-zero value. When an attempt is made to put the urb back in a list
+ * the unlinked field will be checked and the urb will then be completed.
+ * Context: tasklet
+ */
+static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct list_head *e;
+ struct oz_hcd *ozhcd;
+ unsigned long irq_state;
+ u8 ix;
+ if (port == 0) {
+ oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
+ return;
+ }
+ ozhcd = port->ozhcd;
+ if (ozhcd == 0) {
+ oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
+ return;
+ }
+
+ /* Look in the tasklet queue.
+ */
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ list_for_each(e, &ozhcd->urb_cancel_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urb == urbl->urb) {
+ list_del_init(e);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ goto out2;
+ }
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urbl = 0;
+
+ /* Look in the orphanage.
+ */
+ spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
+ list_for_each(e, &ozhcd->orphanage) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del(e);
+ oz_trace("Found urb in orphanage\n");
+ goto out;
+ }
+ }
+ ix = (ep_num & 0xf);
+ urbl = 0;
+ if ((ep_num & USB_DIR_IN) && ix)
+ urbl = oz_remove_urb(port->in_ep[ix], urb);
+ else
+ urbl = oz_remove_urb(port->out_ep[ix], urb);
+out:
+ spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
+out2:
+ if (urbl) {
+ urb->actual_length = 0;
+ oz_free_urb_link(urbl);
+ oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_urb_cancel_tasklet(unsigned long unused)
+{
+ unsigned long irq_state;
+ struct urb *urb;
+ struct oz_hcd *ozhcd = oz_hcd_claim();
+ if (ozhcd == 0)
+ return;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ while (!list_empty(&ozhcd->urb_cancel_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ozhcd->urb_cancel_list,
+ struct oz_urb_link, link);
+ list_del_init(&urbl->link);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urb = urbl->urb;
+ if (urb->unlinked)
+ oz_urb_cancel(urbl->port, urbl->ep_num, urb);
+ oz_free_urb_link(urbl);
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_hcd_put(ozhcd);
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
+{
+ if (ozhcd) {
+ struct oz_urb_link *urbl;
+ while (!list_empty(&ozhcd->orphanage)) {
+ urbl = list_first_entry(&ozhcd->orphanage,
+ struct oz_urb_link, link);
+ list_del(&urbl->link);
+ oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
+ oz_free_urb_link(urbl);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_hcd_start(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_start()\n");
+ hcd->power_budget = 200;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_stop(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_stop()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_shutdown(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_shutdown()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+#ifdef WANT_EVENT_TRACE
+static u8 oz_get_irq_ctx(void)
+{
+ u8 irq_info = 0;
+ if (in_interrupt())
+ irq_info |= 1;
+ if (in_irq())
+ irq_info |= 2;
+ return irq_info;
+}
+#endif /* WANT_EVENT_TRACE */
+/*------------------------------------------------------------------------------
+ * Called to queue an urb for the device.
+ * This function should return a non-zero error code if it fails the urb but
+ * should not call usb_hcd_giveback_urb().
+ * Context: any
+ */
+static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ int rc = 0;
+ int port_ix;
+ struct oz_port *port;
+ unsigned long irq_state;
+ struct oz_urb_link *urbl;
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
+ jiffies, urb);
+ oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
+ (u16)urb->number_of_packets, urb, urb->pipe);
+ if (unlikely(ozhcd == 0)) {
+ oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
+ jiffies, urb);
+ return -EPIPE;
+ }
+ if (unlikely(hcd->state != HC_STATE_RUNNING)) {
+ oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
+ jiffies, urb);
+ return -EPIPE;
+ }
+ port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ if (port_ix < 0)
+ return -EPIPE;
+ port = &ozhcd->ports[port_ix];
+ if (port == 0)
+ return -EPIPE;
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ oz_trace("Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
+ return -EPIPE;
+ }
+ urb->hcpriv = port;
+ /* Put request in queue for processing by tasklet.
+ */
+ urbl = oz_alloc_urb_link();
+ if (unlikely(urbl == 0))
+ return -ENOMEM;
+ urbl->urb = urb;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (unlikely(rc)) {
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_free_urb_link(urbl);
+ return rc;
+ }
+ list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ tasklet_schedule(&g_urb_process_tasklet);
+ atomic_inc(&g_pending_urbs);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
+ struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct list_head *e;
+ if (unlikely(ep == 0))
+ return 0;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del_init(e);
+ if (usb_pipeisoc(urb->pipe)) {
+ ep->credit -= urb->number_of_packets;
+ if (ep->credit < 0)
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT,
+ usb_pipein(urb->pipe) ?
+ (ep->ep_num | USB_DIR_IN) : ep->ep_num,
+ 0, 0, ep->credit);
+ }
+ return urbl;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Called to dequeue a previously submitted urb for the device.
+ * Context: any
+ */
+static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ struct oz_urb_link *urbl = 0;
+ int rc;
+ unsigned long irq_state;
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
+ urbl = oz_alloc_urb_link();
+ if (unlikely(urbl == 0))
+ return -ENOMEM;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ /* The following function checks the urb is still in the queue
+ * maintained by the core and that the unlinked field is zero.
+ * If both are true the function sets the unlinked field and returns
+ * zero. Otherwise it returns an error.
+ */
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ /* We have to check we haven't completed the urb or are about
+ * to complete it. When we do we set hcpriv to 0 so if this has
+ * already happened we don't put the urb in the cancel queue.
+ */
+ if ((rc == 0) && urb->hcpriv) {
+ urbl->urb = urb;
+ urbl->port = (struct oz_port *)urb->hcpriv;
+ urbl->ep_num = usb_pipeendpoint(urb->pipe);
+ if (usb_pipein(urb->pipe))
+ urbl->ep_num |= USB_DIR_IN;
+ list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ tasklet_schedule(&g_urb_cancel_tasklet);
+ } else {
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_free_urb_link(urbl);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ oz_trace("oz_hcd_endpoint_disable\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ oz_trace("oz_hcd_endpoint_reset\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_get_frame_number\n");
+ return oz_usb_get_frame_number();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
+ * always do that in softirq context.
+ */
+static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ int i;
+
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
+ buf[0] = 0;
+
+ spin_lock_bh(&ozhcd->hcd_lock);
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
+ oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
+ ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
+ buf[0] |= 1<<(i+1);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return buf[0] ? 1 : 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void oz_get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
+ memset(desc, 0, sizeof(*desc));
+ desc->bDescriptorType = 0x29;
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = (__force __u16)
+ __constant_cpu_to_le16(0x0001);
+ desc->bNbrPorts = OZ_NB_PORTS;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
+{
+ struct oz_port *port;
+ int err = 0;
+ u8 port_id = (u8)windex;
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned set_bits = 0;
+ unsigned clear_bits = 0;
+ oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
+ if ((port_id < 1) || (port_id > OZ_NB_PORTS))
+ return -EPIPE;
+ port = &ozhcd->ports[port_id-1];
+ switch (wvalue) {
+ case USB_PORT_FEAT_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
+ clear_bits = USB_PORT_STAT_RESET;
+ ozhcd->ports[port_id-1].bus_addr = 0;
+ break;
+ case USB_PORT_FEAT_POWER:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ set_bits |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_LOWSPEED:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ break;
+ case USB_PORT_FEAT_TEST:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ break;
+ }
+ if (set_bits || clear_bits) {
+ spin_lock_bh(&port->port_lock);
+ port->status &= ~clear_bits;
+ port->status |= set_bits;
+ spin_unlock_bh(&port->port_lock);
+ }
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
+ port->status);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
+{
+ struct oz_port *port;
+ int err = 0;
+ u8 port_id = (u8)windex;
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned clear_bits = 0;
+ oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
+ if ((port_id < 1) || (port_id > OZ_NB_PORTS))
+ return -EPIPE;
+ port = &ozhcd->ports[port_id-1];
+ switch (wvalue) {
+ case USB_PORT_FEAT_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ clear_bits = USB_PORT_STAT_ENABLE;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ break;
+ case USB_PORT_FEAT_POWER:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ clear_bits |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_LOWSPEED:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ clear_bits = (USB_PORT_FEAT_C_RESET << 16);
+ break;
+ case USB_PORT_FEAT_TEST:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ break;
+ }
+ if (clear_bits) {
+ spin_lock_bh(&port->port_lock);
+ port->status &= ~clear_bits;
+ spin_unlock_bh(&port->port_lock);
+ }
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
+ ozhcd->ports[port_id-1].status);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
+{
+ struct oz_hcd *ozhcd;
+ u32 status = 0;
+ if ((windex < 1) || (windex > OZ_NB_PORTS))
+ return -EPIPE;
+ ozhcd = oz_hcd_private(hcd);
+ oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
+ status = ozhcd->ports[windex-1].status;
+ put_unaligned(cpu_to_le32(status), (__le32 *)buf);
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
+ u16 windex, char *buf, u16 wlength)
+{
+ int err = 0;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
+ switch (req_type) {
+ case ClearHubFeature:
+ oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
+ break;
+ case ClearPortFeature:
+ err = oz_clear_port_feature(hcd, wvalue, windex);
+ break;
+ case GetHubDescriptor:
+ oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
+ req_type);
+ put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
+ break;
+ case GetPortStatus:
+ err = oz_get_port_status(hcd, windex, buf);
+ break;
+ case SetHubFeature:
+ oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
+ break;
+ case SetPortFeature:
+ err = oz_set_port_feature(hcd, wvalue, windex);
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
+ break;
+ }
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
+{
+ struct oz_hcd *ozhcd;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ hcd->state = HC_STATE_SUSPENDED;
+ ozhcd->flags |= OZ_HDC_F_SUSPENDED;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_bus_resume(struct usb_hcd *hcd)
+{
+ struct oz_hcd *ozhcd;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
+ hcd->state = HC_STATE_RUNNING;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+static void oz_plat_shutdown(struct platform_device *dev)
+{
+ oz_trace("oz_plat_shutdown()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_plat_probe(struct platform_device *dev)
+{
+ int i;
+ int err;
+ struct usb_hcd *hcd;
+ struct oz_hcd *ozhcd;
+ oz_trace("oz_plat_probe()\n");
+ hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
+ if (hcd == 0) {
+ oz_trace("Failed to created hcd object OK\n");
+ return -ENOMEM;
+ }
+ ozhcd = oz_hcd_private(hcd);
+ memset(ozhcd, 0, sizeof(*ozhcd));
+ INIT_LIST_HEAD(&ozhcd->urb_pending_list);
+ INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
+ INIT_LIST_HEAD(&ozhcd->orphanage);
+ ozhcd->hcd = hcd;
+ ozhcd->conn_port = -1;
+ spin_lock_init(&ozhcd->hcd_lock);
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ struct oz_port *port = &ozhcd->ports[i];
+ port->ozhcd = ozhcd;
+ port->flags = 0;
+ port->status = 0;
+ port->bus_addr = 0xff;
+ spin_lock_init(&port->port_lock);
+ }
+ err = usb_add_hcd(hcd, 0, 0);
+ if (err) {
+ oz_trace("Failed to add hcd object OK\n");
+ usb_put_hcd(hcd);
+ return -1;
+ }
+ spin_lock_bh(&g_hcdlock);
+ g_ozhcd = ozhcd;
+ spin_unlock_bh(&g_hcdlock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct oz_hcd *ozhcd;
+ oz_trace("oz_plat_remove()\n");
+ if (hcd == 0)
+ return -1;
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&g_hcdlock);
+ if (ozhcd == g_ozhcd)
+ g_ozhcd = 0;
+ spin_unlock_bh(&g_hcdlock);
+ oz_trace("Clearing orphanage\n");
+ oz_hcd_clear_orphanage(ozhcd, -EPIPE);
+ oz_trace("Removing hcd\n");
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ oz_empty_link_pool();
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ oz_trace("oz_plat_suspend()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_resume(struct platform_device *dev)
+{
+ oz_trace("oz_plat_resume()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_hcd_init(void)
+{
+ int err;
+ if (usb_disabled())
+ return -ENODEV;
+ tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
+ tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
+ err = platform_driver_register(&g_oz_plat_drv);
+ oz_trace("platform_driver_register() returned %d\n", err);
+ if (err)
+ goto error;
+ g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
+ if (g_plat_dev == 0) {
+ err = -ENOMEM;
+ goto error1;
+ }
+ oz_trace("platform_device_alloc() succeeded\n");
+ err = platform_device_add(g_plat_dev);
+ if (err)
+ goto error2;
+ oz_trace("platform_device_add() succeeded\n");
+ return 0;
+error2:
+ platform_device_put(g_plat_dev);
+error1:
+ platform_driver_unregister(&g_oz_plat_drv);
+error:
+ tasklet_disable(&g_urb_process_tasklet);
+ tasklet_disable(&g_urb_cancel_tasklet);
+ oz_trace("oz_hcd_init() failed %d\n", err);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_hcd_term(void)
+{
+ tasklet_disable(&g_urb_process_tasklet);
+ tasklet_disable(&g_urb_cancel_tasklet);
+ platform_device_unregister(g_plat_dev);
+ platform_driver_unregister(&g_oz_plat_drv);
+ oz_trace("Pending urbs:%d\n", atomic_read(&g_pending_urbs));
+}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
new file mode 100644
index 00000000000..9b30dfd0997
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.h
@@ -0,0 +1,15 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+#ifndef _OZHCD_H
+#define _OZHCD_H
+
+int oz_hcd_init(void);
+void oz_hcd_term(void);
+void *oz_hcd_pd_arrived(void *ctx);
+void oz_hcd_pd_departed(void *ctx);
+void oz_hcd_pd_reset(void *hpd, void *hport);
+
+#endif /* _OZHCD_H */
+
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
new file mode 100644
index 00000000000..aaf2ccc0bcf
--- /dev/null
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -0,0 +1,58 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/ieee80211.h>
+#include "ozconfig.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozcdev.h"
+#include "oztrace.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * The name of the 802.11 mac device. Empty string is the default value but a
+ * value can be supplied as a parameter to the module. An empty string means
+ * bind to nothing. '*' means bind to all netcards - this includes non-802.11
+ * netcards. Bindings can be added later using an IOCTL.
+ */
+char *g_net_dev = "";
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int __init ozwpan_init(void)
+{
+ oz_event_init();
+ oz_cdev_register();
+ oz_protocol_init(g_net_dev);
+ oz_app_enable(OZ_APPID_USB, 1);
+ oz_apps_init();
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void __exit ozwpan_exit(void)
+{
+ oz_protocol_term();
+ oz_apps_term();
+ oz_cdev_deregister();
+ oz_event_term();
+}
+/*------------------------------------------------------------------------------
+ */
+module_param(g_net_dev, charp, S_IRUGO);
+module_init(ozwpan_init);
+module_exit(ozwpan_exit);
+
+MODULE_AUTHOR("Chris Kelly");
+MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
+MODULE_VERSION("1.0.8");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
new file mode 100644
index 00000000000..2b45d3d1800
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -0,0 +1,832 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "oztrace.h"
+#include "ozevent.h"
+#include "ozcdev.h"
+#include "ozusbsvc.h"
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <net/psnap.h>
+/*------------------------------------------------------------------------------
+ */
+#define OZ_MAX_TX_POOL_SIZE 6
+/* Maximum number of uncompleted isoc frames that can be pending.
+ */
+#define OZ_MAX_SUBMITTED_ISOC 16
+/*------------------------------------------------------------------------------
+ */
+static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
+static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
+static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
+static int oz_send_isoc_frame(struct oz_pd *pd);
+static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
+static void oz_isoc_stream_free(struct oz_isoc_stream *st);
+static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data);
+static void oz_isoc_destructor(struct sk_buff *skb);
+static int oz_def_app_init(void);
+static void oz_def_app_term(void);
+static int oz_def_app_start(struct oz_pd *pd, int resume);
+static void oz_def_app_stop(struct oz_pd *pd, int pause);
+static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
+/*------------------------------------------------------------------------------
+ * Counts the uncompleted isoc frames submitted to netcard.
+ */
+static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
+/* Application handler functions.
+ */
+static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
+ {oz_usb_init,
+ oz_usb_term,
+ oz_usb_start,
+ oz_usb_stop,
+ oz_usb_rx,
+ oz_usb_heartbeat,
+ oz_usb_farewell,
+ OZ_APPID_USB},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ 0,
+ 0,
+ OZ_APPID_UNUSED1},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ 0,
+ 0,
+ OZ_APPID_UNUSED2},
+
+ {oz_cdev_init,
+ oz_cdev_term,
+ oz_cdev_start,
+ oz_cdev_stop,
+ oz_cdev_rx,
+ 0,
+ 0,
+ OZ_APPID_SERIAL},
+};
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_def_app_init(void)
+{
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void oz_def_app_term(void)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_def_app_start(struct oz_pd *pd, int resume)
+{
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_def_app_stop(struct oz_pd *pd, int pause)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_set_state(struct oz_pd *pd, unsigned state)
+{
+ pd->state = state;
+ oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
+#ifdef WANT_TRACE
+ switch (state) {
+ case OZ_PD_S_IDLE:
+ oz_trace("PD State: OZ_PD_S_IDLE\n");
+ break;
+ case OZ_PD_S_CONNECTED:
+ oz_trace("PD State: OZ_PD_S_CONNECTED\n");
+ break;
+ case OZ_PD_S_STOPPED:
+ oz_trace("PD State: OZ_PD_S_STOPPED\n");
+ break;
+ case OZ_PD_S_SLEEP:
+ oz_trace("PD State: OZ_PD_S_SLEEP\n");
+ break;
+ }
+#endif /* WANT_TRACE */
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_get(struct oz_pd *pd)
+{
+ atomic_inc(&pd->ref_count);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_put(struct oz_pd *pd)
+{
+ if (atomic_dec_and_test(&pd->ref_count))
+ oz_pd_destroy(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+struct oz_pd *oz_pd_alloc(u8 *mac_addr)
+{
+ struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
+ if (pd) {
+ int i;
+ atomic_set(&pd->ref_count, 2);
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ spin_lock_init(&pd->app_lock[i]);
+ pd->last_rx_pkt_num = 0xffffffff;
+ oz_pd_set_state(pd, OZ_PD_S_IDLE);
+ pd->max_tx_size = OZ_MAX_TX_SIZE;
+ memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
+ if (0 != oz_elt_buf_init(&pd->elt_buff)) {
+ kfree(pd);
+ pd = 0;
+ }
+ spin_lock_init(&pd->tx_frame_lock);
+ INIT_LIST_HEAD(&pd->tx_queue);
+ INIT_LIST_HEAD(&pd->farewell_list);
+ pd->last_sent_frame = &pd->tx_queue;
+ spin_lock_init(&pd->stream_lock);
+ INIT_LIST_HEAD(&pd->stream_list);
+ }
+ return pd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_destroy(struct oz_pd *pd)
+{
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ struct oz_isoc_stream *st;
+ struct oz_farewell *fwell;
+ oz_trace("Destroying PD\n");
+ /* Delete any streams.
+ */
+ e = pd->stream_list.next;
+ while (e != &pd->stream_list) {
+ st = container_of(e, struct oz_isoc_stream, link);
+ e = e->next;
+ oz_isoc_stream_free(st);
+ }
+ /* Free any queued tx frames.
+ */
+ e = pd->tx_queue.next;
+ while (e != &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame, link);
+ e = e->next;
+ oz_retire_frame(pd, f);
+ }
+ oz_elt_buf_term(&pd->elt_buff);
+ /* Free any farewells.
+ */
+ e = pd->farewell_list.next;
+ while (e != &pd->farewell_list) {
+ fwell = container_of(e, struct oz_farewell, link);
+ e = e->next;
+ kfree(fwell);
+ }
+ /* Deallocate all frames in tx pool.
+ */
+ while (pd->tx_pool) {
+ e = pd->tx_pool;
+ pd->tx_pool = e->next;
+ kfree(container_of(e, struct oz_tx_frame, link));
+ }
+ if (pd->net_dev)
+ dev_put(pd->net_dev);
+ kfree(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
+{
+ struct oz_app_if *ai;
+ int rc = 0;
+ oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (apps & (1<<ai->app_id)) {
+ if (ai->start(pd, resume)) {
+ rc = -1;
+ oz_trace("Unabled to start service %d\n",
+ ai->app_id);
+ break;
+ }
+ oz_polling_lock_bh();
+ pd->total_apps |= (1<<ai->app_id);
+ if (resume)
+ pd->paused_apps &= ~(1<<ai->app_id);
+ oz_polling_unlock_bh();
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
+{
+ struct oz_app_if *ai;
+ oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (apps & (1<<ai->app_id)) {
+ oz_polling_lock_bh();
+ if (pause) {
+ pd->paused_apps |= (1<<ai->app_id);
+ } else {
+ pd->total_apps &= ~(1<<ai->app_id);
+ pd->paused_apps &= ~(1<<ai->app_id);
+ }
+ oz_polling_unlock_bh();
+ ai->stop(pd, pause);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
+{
+ struct oz_app_if *ai;
+ int more = 0;
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (ai->heartbeat && (apps & (1<<ai->app_id))) {
+ if (ai->heartbeat(pd))
+ more = 1;
+ }
+ }
+ if (more)
+ oz_pd_request_heartbeat(pd);
+ if (pd->mode & OZ_F_ISOC_ANYTIME) {
+ int count = 8;
+ while (count-- && (oz_send_isoc_frame(pd) >= 0))
+ ;
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_stop(struct oz_pd *pd)
+{
+ u16 stop_apps = 0;
+ oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
+ oz_pd_indicate_farewells(pd);
+ oz_polling_lock_bh();
+ stop_apps = pd->total_apps;
+ pd->total_apps = 0;
+ pd->paused_apps = 0;
+ oz_polling_unlock_bh();
+ oz_services_stop(pd, stop_apps, 0);
+ oz_polling_lock_bh();
+ oz_pd_set_state(pd, OZ_PD_S_STOPPED);
+ /* Remove from PD list.*/
+ list_del(&pd->link);
+ oz_polling_unlock_bh();
+ oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
+ oz_timer_delete(pd, 0);
+ oz_pd_put(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_pd_sleep(struct oz_pd *pd)
+{
+ int do_stop = 0;
+ u16 stop_apps = 0;
+ oz_polling_lock_bh();
+ if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
+ oz_polling_unlock_bh();
+ return 0;
+ }
+ if (pd->keep_alive_j && pd->session_id) {
+ oz_pd_set_state(pd, OZ_PD_S_SLEEP);
+ pd->pulse_time_j = jiffies + pd->keep_alive_j;
+ oz_trace("Sleep Now %lu until %lu\n",
+ jiffies, pd->pulse_time_j);
+ } else {
+ do_stop = 1;
+ }
+ stop_apps = pd->total_apps;
+ oz_polling_unlock_bh();
+ if (do_stop) {
+ oz_pd_stop(pd);
+ } else {
+ oz_services_stop(pd, stop_apps, 1);
+ oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
+ }
+ return do_stop;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
+{
+ struct oz_tx_frame *f = 0;
+ spin_lock_bh(&pd->tx_frame_lock);
+ if (pd->tx_pool) {
+ f = container_of(pd->tx_pool, struct oz_tx_frame, link);
+ pd->tx_pool = pd->tx_pool->next;
+ pd->tx_pool_count--;
+ }
+ spin_unlock_bh(&pd->tx_frame_lock);
+ if (f == 0)
+ f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
+ if (f) {
+ f->total_size = sizeof(struct oz_hdr);
+ INIT_LIST_HEAD(&f->link);
+ INIT_LIST_HEAD(&f->elt_list);
+ }
+ return f;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ spin_lock_bh(&pd->tx_frame_lock);
+ if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
+ f->link.next = pd->tx_pool;
+ pd->tx_pool = &f->link;
+ pd->tx_pool_count++;
+ f = 0;
+ } else {
+ kfree(f);
+ }
+ spin_unlock_bh(&pd->tx_frame_lock);
+ if (f)
+ kfree(f);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_prepare_frame(struct oz_pd *pd, int empty)
+{
+ struct oz_tx_frame *f;
+ if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
+ return -1;
+ if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
+ return -1;
+ if (!empty && !oz_are_elts_available(&pd->elt_buff))
+ return -1;
+ f = oz_tx_frame_alloc(pd);
+ if (f == 0)
+ return -1;
+ f->hdr.control =
+ (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
+ ++pd->last_tx_pkt_num;
+ put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
+ if (empty == 0) {
+ oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
+ pd->max_tx_size, &f->elt_list);
+ }
+ spin_lock(&pd->tx_frame_lock);
+ list_add_tail(&f->link, &pd->tx_queue);
+ pd->nb_queued_frames++;
+ spin_unlock(&pd->tx_frame_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ struct sk_buff *skb = 0;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct list_head *e;
+ /* Allocate skb with enough space for the lower layers as well
+ * as the space we need.
+ */
+ skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ /* Reserve the head room for lower layers.
+ */
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0)
+ goto fail;
+ /* Push the tail to the end of the area we are going to copy to.
+ */
+ oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
+ f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
+ /* Copy the elements into the frame body.
+ */
+ elt = (struct oz_elt *)(oz_hdr+1);
+ for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ memcpy(elt, ei->data, ei->length);
+ elt = oz_next_elt(elt);
+ }
+ return skb;
+fail:
+ kfree_skb(skb);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ struct list_head *e;
+ struct oz_elt_info *ei;
+ e = f->elt_list.next;
+ while (e != &f->elt_list) {
+ ei = container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ list_del_init(&ei->link);
+ if (ei->callback)
+ ei->callback(pd, ei->context);
+ spin_lock_bh(&pd->elt_buff.lock);
+ oz_elt_info_free(&pd->elt_buff, ei);
+ spin_unlock_bh(&pd->elt_buff.lock);
+ }
+ oz_tx_frame_free(pd, f);
+ if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
+ oz_trim_elt_pool(&pd->elt_buff);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data)
+{
+ struct sk_buff *skb;
+ struct oz_tx_frame *f;
+ struct list_head *e;
+ *more_data = 0;
+ spin_lock(&pd->tx_frame_lock);
+ e = pd->last_sent_frame->next;
+ if (e == &pd->tx_queue) {
+ spin_unlock(&pd->tx_frame_lock);
+ return -1;
+ }
+ pd->last_sent_frame = e;
+ if (e->next != &pd->tx_queue)
+ *more_data = 1;
+ f = container_of(e, struct oz_tx_frame, link);
+ skb = oz_build_frame(pd, f);
+ spin_unlock(&pd->tx_frame_lock);
+ oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
+ if (skb) {
+ oz_event_log(OZ_EVT_TX_FRAME,
+ 0,
+ (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
+ 0, f->hdr.pkt_num);
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_send_queued_frames(struct oz_pd *pd, int backlog)
+{
+ int more;
+ if (backlog < OZ_MAX_QUEUED_FRAMES) {
+ if (oz_send_next_queued_frame(pd, &more) >= 0) {
+ while (more && oz_send_next_queued_frame(pd, &more))
+ ;
+ } else {
+ if (((pd->mode & OZ_F_ISOC_ANYTIME) == 0)
+ || (pd->isoc_sent == 0)) {
+ if (oz_prepare_frame(pd, 1) >= 0)
+ oz_send_next_queued_frame(pd, &more);
+ }
+ }
+ } else {
+ oz_send_next_queued_frame(pd, &more);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_send_isoc_frame(struct oz_pd *pd)
+{
+ struct sk_buff *skb = 0;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct list_head *e;
+ struct list_head list;
+ int total_size = sizeof(struct oz_hdr);
+ INIT_LIST_HEAD(&list);
+
+ oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
+ pd->max_tx_size, &list);
+ if (list.next == &list)
+ return 0;
+ skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0) {
+ oz_trace("Cannot alloc skb\n");
+ oz_elt_info_free_chain(&pd->elt_buff, &list);
+ return -1;
+ }
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return -1;
+ }
+ oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
+ oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
+ oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ elt = (struct oz_elt *)(oz_hdr+1);
+
+ for (e = list.next; e != &list; e = e->next) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ memcpy(elt, ei->data, ei->length);
+ elt = oz_next_elt(elt);
+ }
+ oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
+ dev_queue_xmit(skb);
+ oz_elt_info_free_chain(&pd->elt_buff, &list);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
+{
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ struct list_head *first = 0;
+ struct list_head *last = 0;
+ u8 diff;
+ u32 pkt_num;
+
+ spin_lock(&pd->tx_frame_lock);
+ e = pd->tx_queue.next;
+ while (e != &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame, link);
+ pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
+ diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
+ if (diff > OZ_LAST_PN_HALF_CYCLE)
+ break;
+ if (first == 0)
+ first = e;
+ last = e;
+ e = e->next;
+ pd->nb_queued_frames--;
+ }
+ if (first) {
+ last->next->prev = &pd->tx_queue;
+ pd->tx_queue.next = last->next;
+ last->next = 0;
+ }
+ pd->last_sent_frame = &pd->tx_queue;
+ spin_unlock(&pd->tx_frame_lock);
+ while (first) {
+ f = container_of(first, struct oz_tx_frame, link);
+ first = first->next;
+ oz_retire_frame(pd, f);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Precondition: stream_lock must be held.
+ * Context: softirq
+ */
+static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
+{
+ struct list_head *e;
+ struct oz_isoc_stream *st;
+ list_for_each(e, &pd->stream_list) {
+ st = container_of(e, struct oz_isoc_stream, link);
+ if (st->ep_num == ep_num)
+ return st;
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
+{
+ struct oz_isoc_stream *st =
+ kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
+ if (!st)
+ return -ENOMEM;
+ st->ep_num = ep_num;
+ spin_lock_bh(&pd->stream_lock);
+ if (!pd_stream_find(pd, ep_num)) {
+ list_add(&st->link, &pd->stream_list);
+ st = 0;
+ }
+ spin_unlock_bh(&pd->stream_lock);
+ if (st)
+ kfree(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_isoc_stream_free(struct oz_isoc_stream *st)
+{
+ if (st->skb)
+ kfree_skb(st->skb);
+ kfree(st);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
+{
+ struct oz_isoc_stream *st;
+ spin_lock_bh(&pd->stream_lock);
+ st = pd_stream_find(pd, ep_num);
+ if (st)
+ list_del(&st->link);
+ spin_unlock_bh(&pd->stream_lock);
+ if (st)
+ oz_isoc_stream_free(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+static void oz_isoc_destructor(struct sk_buff *skb)
+{
+ atomic_dec(&g_submitted_isoc);
+ oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
+ 0, skb, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
+{
+ struct net_device *dev = pd->net_dev;
+ struct oz_isoc_stream *st;
+ u8 nb_units = 0;
+ struct sk_buff *skb = 0;
+ struct oz_hdr *oz_hdr = 0;
+ int size = 0;
+ spin_lock_bh(&pd->stream_lock);
+ st = pd_stream_find(pd, ep_num);
+ if (st) {
+ skb = st->skb;
+ st->skb = 0;
+ nb_units = st->nb_units;
+ st->nb_units = 0;
+ oz_hdr = st->oz_hdr;
+ size = st->size;
+ }
+ spin_unlock_bh(&pd->stream_lock);
+ if (!st)
+ return 0;
+ if (!skb) {
+ /* Allocate enough space for max size frame. */
+ skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
+ GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ /* Reserve the head room for lower layers. */
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
+ oz_hdr = (struct oz_hdr *)skb_put(skb, size);
+ }
+ memcpy(skb_put(skb, len), data, len);
+ size += len;
+ if (++nb_units < pd->ms_per_isoc) {
+ spin_lock_bh(&pd->stream_lock);
+ st->skb = skb;
+ st->nb_units = nb_units;
+ st->oz_hdr = oz_hdr;
+ st->size = size;
+ spin_unlock_bh(&pd->stream_lock);
+ } else {
+ struct oz_hdr oz;
+ struct oz_isoc_large iso;
+ spin_lock_bh(&pd->stream_lock);
+ iso.frame_number = st->frame_num;
+ st->frame_num += nb_units;
+ spin_unlock_bh(&pd->stream_lock);
+ oz.control =
+ (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
+ oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ oz.pkt_num = 0;
+ iso.endpoint = ep_num;
+ iso.format = OZ_DATA_F_ISOC_LARGE;
+ iso.ms_data = nb_units;
+ memcpy(oz_hdr, &oz, sizeof(oz));
+ memcpy(oz_hdr+1, &iso, sizeof(iso));
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return -1;
+ }
+ if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
+ skb->destructor = oz_isoc_destructor;
+ atomic_inc(&g_submitted_isoc);
+ oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
+ skb, atomic_read(&g_submitted_isoc));
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
+ } else {
+ oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+ kfree_skb(skb);
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_apps_init(void)
+{
+ int i;
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ if (g_app_if[i].init)
+ g_app_if[i].init();
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_apps_term(void)
+{
+ int i;
+ /* Terminate all the apps. */
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ if (g_app_if[i].term)
+ g_app_if[i].term();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
+{
+ struct oz_app_if *ai;
+ if (app_id == 0 || app_id > OZ_APPID_MAX)
+ return;
+ ai = &g_app_if[app_id-1];
+ ai->rx(pd, elt);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_indicate_farewells(struct oz_pd *pd)
+{
+ struct oz_farewell *f;
+ struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
+ while (1) {
+ oz_polling_lock_bh();
+ if (list_empty(&pd->farewell_list)) {
+ oz_polling_unlock_bh();
+ break;
+ }
+ f = list_first_entry(&pd->farewell_list,
+ struct oz_farewell, link);
+ list_del(&f->link);
+ oz_polling_unlock_bh();
+ if (ai->farewell)
+ ai->farewell(pd, f->ep_num, f->report, f->len);
+ kfree(f);
+ }
+}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
new file mode 100644
index 00000000000..afc77f0260f
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -0,0 +1,121 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPD_H_
+#define _OZPD_H_
+
+#include "ozeltbuf.h"
+
+/* PD state
+ */
+#define OZ_PD_S_IDLE 0x1
+#define OZ_PD_S_CONNECTED 0x2
+#define OZ_PD_S_SLEEP 0x4
+#define OZ_PD_S_STOPPED 0x8
+
+/* Timer event types.
+ */
+#define OZ_TIMER_TOUT 1
+#define OZ_TIMER_HEARTBEAT 2
+#define OZ_TIMER_STOP 3
+
+/* Data structure that hold information on a frame for transmisson. This is
+ * built when the frame is first transmitted and is used to rebuild the frame
+ * if a re-transmission is required.
+ */
+struct oz_tx_frame {
+ struct list_head link;
+ struct list_head elt_list;
+ struct oz_hdr hdr;
+ int total_size;
+};
+
+struct oz_isoc_stream {
+ struct list_head link;
+ u8 ep_num;
+ u8 frame_num;
+ u8 nb_units;
+ int size;
+ struct sk_buff *skb;
+ struct oz_hdr *oz_hdr;
+};
+
+struct oz_farewell {
+ struct list_head link;
+ u8 ep_num;
+ u8 index;
+ u8 report[1];
+ u8 len;
+};
+
+/* Data structure that holds information on a specific peripheral device (PD).
+ */
+struct oz_pd {
+ struct list_head link;
+ atomic_t ref_count;
+ u8 mac_addr[ETH_ALEN];
+ unsigned state;
+ unsigned state_flags;
+ unsigned send_flags;
+ u16 total_apps;
+ u16 paused_apps;
+ u8 session_id;
+ u8 param_rsp_status;
+ u8 pd_info;
+ u8 isoc_sent;
+ u32 last_rx_pkt_num;
+ u32 last_tx_pkt_num;
+ u32 trigger_pkt_num;
+ unsigned long pulse_time_j;
+ unsigned long timeout_time_j;
+ unsigned long pulse_period_j;
+ unsigned long presleep_j;
+ unsigned long keep_alive_j;
+ unsigned long last_rx_time_j;
+ struct oz_elt_buf elt_buff;
+ void *app_ctx[OZ_APPID_MAX];
+ spinlock_t app_lock[OZ_APPID_MAX];
+ int max_tx_size;
+ u8 heartbeat_requested;
+ u8 mode;
+ u8 ms_per_isoc;
+ unsigned max_stream_buffering;
+ int nb_queued_frames;
+ struct list_head *tx_pool;
+ int tx_pool_count;
+ spinlock_t tx_frame_lock;
+ struct list_head *last_sent_frame;
+ struct list_head tx_queue;
+ struct list_head farewell_list;
+ spinlock_t stream_lock;
+ struct list_head stream_list;
+ struct net_device *net_dev;
+};
+
+#define OZ_MAX_QUEUED_FRAMES 4
+
+struct oz_pd *oz_pd_alloc(u8 *mac_addr);
+void oz_pd_destroy(struct oz_pd *pd);
+void oz_pd_get(struct oz_pd *pd);
+void oz_pd_put(struct oz_pd *pd);
+void oz_pd_set_state(struct oz_pd *pd, unsigned state);
+void oz_pd_indicate_farewells(struct oz_pd *pd);
+int oz_pd_sleep(struct oz_pd *pd);
+void oz_pd_stop(struct oz_pd *pd);
+void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
+int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
+void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
+int oz_prepare_frame(struct oz_pd *pd, int empty);
+void oz_send_queued_frames(struct oz_pd *pd, int backlog);
+void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
+int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
+int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len);
+void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
+void oz_apps_init(void);
+void oz_apps_term(void);
+
+#endif /* Sentry */
+
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
new file mode 100644
index 00000000000..ad857eeabbb
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -0,0 +1,957 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/ieee80211.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbsvc.h"
+#include "oztrace.h"
+#include "ozappif.h"
+#include "ozevent.h"
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <net/psnap.h>
+/*------------------------------------------------------------------------------
+ */
+#define OZ_CF_CONN_SUCCESS 1
+#define OZ_CF_CONN_FAILURE 2
+
+#define OZ_DO_STOP 1
+#define OZ_DO_SLEEP 2
+
+/* States of the timer.
+ */
+#define OZ_TIMER_IDLE 0
+#define OZ_TIMER_SET 1
+#define OZ_TIMER_IN_HANDLER 2
+
+#define OZ_MAX_TIMER_POOL_SIZE 16
+
+/*------------------------------------------------------------------------------
+ */
+struct oz_binding {
+ struct packet_type ptype;
+ char name[OZ_MAX_BINDING_LEN];
+ struct oz_binding *next;
+};
+
+struct oz_timer {
+ struct list_head link;
+ struct oz_pd *pd;
+ unsigned long due_time;
+ int type;
+};
+/*------------------------------------------------------------------------------
+ * Static external variables.
+ */
+static DEFINE_SPINLOCK(g_polling_lock);
+static LIST_HEAD(g_pd_list);
+static struct oz_binding *g_binding ;
+static DEFINE_SPINLOCK(g_binding_lock);
+static struct sk_buff_head g_rx_queue;
+static u8 g_session_id;
+static u16 g_apps = 0x1;
+static int g_processing_rx;
+static struct timer_list g_timer;
+static struct oz_timer *g_cur_timer;
+static struct list_head *g_timer_pool;
+static int g_timer_pool_count;
+static int g_timer_state = OZ_TIMER_IDLE;
+static LIST_HEAD(g_timer_list);
+/*------------------------------------------------------------------------------
+ */
+static void oz_protocol_timer_start(void);
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static u8 oz_get_new_session_id(u8 exclude)
+{
+ if (++g_session_id == 0)
+ g_session_id = 1;
+ if (g_session_id == exclude) {
+ if (++g_session_id == 0)
+ g_session_id = 1;
+ }
+ return g_session_id;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
+{
+ struct sk_buff *skb;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct oz_elt_connect_rsp *body;
+ int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
+ sizeof(struct oz_elt_connect_rsp);
+ skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0)
+ return;
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
+ elt = (struct oz_elt *)(oz_hdr+1);
+ body = (struct oz_elt_connect_rsp *)(elt+1);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ /* Fill in device header */
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return;
+ }
+ oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
+ oz_hdr->last_pkt_num = 0;
+ put_unaligned(0, &oz_hdr->pkt_num);
+ oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
+ elt->type = OZ_ELT_CONNECT_RSP;
+ elt->length = sizeof(struct oz_elt_connect_rsp);
+ memset(body, 0, sizeof(struct oz_elt_connect_rsp));
+ body->status = status;
+ if (status == 0) {
+ body->mode = pd->mode;
+ body->session_id = pd->session_id;
+ put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
+ }
+ oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
+ dev_queue_xmit(skb);
+ return;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
+{
+ unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
+
+ switch (kalive & OZ_KALIVE_TYPE_MASK) {
+ case OZ_KALIVE_SPECIAL:
+ pd->keep_alive_j =
+ oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
+ break;
+ case OZ_KALIVE_SECS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
+ break;
+ case OZ_KALIVE_MINS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
+ break;
+ case OZ_KALIVE_HOURS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
+ break;
+ default:
+ pd->keep_alive_j = 0;
+ }
+ oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
+{
+ if (presleep)
+ pd->presleep_j = oz_ms_to_jiffies(presleep*100);
+ else
+ pd->presleep_j = OZ_PRESLEEP_TOUT_J;
+ oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
+ u8 *pd_addr, struct net_device *net_dev)
+{
+ struct oz_pd *pd;
+ struct oz_elt_connect_req *body =
+ (struct oz_elt_connect_req *)(elt+1);
+ u8 rsp_status = OZ_STATUS_SUCCESS;
+ u8 stop_needed = 0;
+ u16 new_apps = g_apps;
+ struct net_device *old_net_dev = 0;
+ struct oz_pd *free_pd = 0;
+ if (cur_pd) {
+ pd = cur_pd;
+ spin_lock_bh(&g_polling_lock);
+ } else {
+ struct oz_pd *pd2 = 0;
+ struct list_head *e;
+ pd = oz_pd_alloc(pd_addr);
+ if (pd == 0)
+ return 0;
+ pd->last_rx_time_j = jiffies;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ pd2 = container_of(e, struct oz_pd, link);
+ if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
+ free_pd = pd;
+ pd = pd2;
+ break;
+ }
+ }
+ if (pd != pd2)
+ list_add_tail(&pd->link, &g_pd_list);
+ }
+ if (pd == 0) {
+ spin_unlock_bh(&g_polling_lock);
+ return 0;
+ }
+ if (pd->net_dev != net_dev) {
+ old_net_dev = pd->net_dev;
+ dev_hold(net_dev);
+ pd->net_dev = net_dev;
+ }
+ oz_trace("Host vendor: %d\n", body->host_vendor);
+ pd->max_tx_size = OZ_MAX_TX_SIZE;
+ pd->mode = body->mode;
+ pd->pd_info = body->pd_info;
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ pd->mode |= OZ_F_ISOC_ANYTIME;
+ pd->ms_per_isoc = body->ms_per_isoc;
+ if (!pd->ms_per_isoc)
+ pd->ms_per_isoc = 4;
+ }
+ if (body->max_len_div16)
+ pd->max_tx_size = ((u16)body->max_len_div16)<<4;
+ oz_trace("Max frame:%u Ms per isoc:%u\n",
+ pd->max_tx_size, pd->ms_per_isoc);
+ pd->max_stream_buffering = 3*1024;
+ pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
+ pd->pulse_period_j = OZ_QUANTUM_J;
+ pd_set_presleep(pd, body->presleep);
+ pd_set_keepalive(pd, body->keep_alive);
+
+ new_apps &= le16_to_cpu(get_unaligned(&body->apps));
+ if ((new_apps & 0x1) && (body->session_id)) {
+ if (pd->session_id) {
+ if (pd->session_id != body->session_id) {
+ rsp_status = OZ_STATUS_SESSION_MISMATCH;
+ goto done;
+ }
+ } else {
+ new_apps &= ~0x1; /* Resume not permitted */
+ pd->session_id =
+ oz_get_new_session_id(body->session_id);
+ }
+ } else {
+ if (pd->session_id && !body->session_id) {
+ rsp_status = OZ_STATUS_SESSION_TEARDOWN;
+ stop_needed = 1;
+ } else {
+ new_apps &= ~0x1; /* Resume not permitted */
+ pd->session_id =
+ oz_get_new_session_id(body->session_id);
+ }
+ }
+done:
+ if (rsp_status == OZ_STATUS_SUCCESS) {
+ u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
+ u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
+ u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
+ oz_timer_delete(pd, OZ_TIMER_STOP);
+ oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
+ new_apps, pd->total_apps, pd->paused_apps);
+ if (start_apps) {
+ if (oz_services_start(pd, start_apps, 0))
+ rsp_status = OZ_STATUS_TOO_MANY_PDS;
+ }
+ if (resume_apps)
+ if (oz_services_start(pd, resume_apps, 1))
+ rsp_status = OZ_STATUS_TOO_MANY_PDS;
+ if (stop_apps)
+ oz_services_stop(pd, stop_apps, 0);
+ oz_pd_request_heartbeat(pd);
+ } else {
+ spin_unlock_bh(&g_polling_lock);
+ }
+ oz_send_conn_rsp(pd, rsp_status);
+ if (rsp_status != OZ_STATUS_SUCCESS) {
+ if (stop_needed)
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ pd = 0;
+ }
+ if (old_net_dev)
+ dev_put(old_net_dev);
+ if (free_pd)
+ oz_pd_destroy(free_pd);
+ return pd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
+ u8 *report, u8 len)
+{
+ struct oz_farewell *f;
+ struct oz_farewell *f2;
+ int found = 0;
+ f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
+ if (!f)
+ return;
+ f->ep_num = ep_num;
+ f->index = index;
+ memcpy(f->report, report, len);
+ oz_trace("RX: Adding farewell report\n");
+ spin_lock(&g_polling_lock);
+ list_for_each_entry(f2, &pd->farewell_list, link) {
+ if ((f2->ep_num == ep_num) && (f2->index == index)) {
+ found = 1;
+ list_del(&f2->link);
+ break;
+ }
+ }
+ list_add_tail(&f->link, &pd->farewell_list);
+ spin_unlock(&g_polling_lock);
+ if (found)
+ kfree(f2);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_rx_frame(struct sk_buff *skb)
+{
+ u8 *mac_hdr;
+ u8 *src_addr;
+ struct oz_elt *elt;
+ int length;
+ struct oz_pd *pd = 0;
+ struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+ int dup = 0;
+ u32 pkt_num;
+
+ oz_event_log(OZ_EVT_RX_PROCESS, 0,
+ (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
+ 0, oz_hdr->pkt_num);
+ oz_trace2(OZ_TRACE_RX_FRAMES,
+ "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
+ oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
+ mac_hdr = skb_mac_header(skb);
+ src_addr = &mac_hdr[ETH_ALEN] ;
+ length = skb->len;
+
+ /* Check the version field */
+ if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
+ oz_trace("Incorrect protocol version: %d\n",
+ oz_get_prot_ver(oz_hdr->control));
+ goto done;
+ }
+
+ pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
+
+ pd = oz_pd_find(src_addr);
+ if (pd) {
+ pd->last_rx_time_j = jiffies;
+ oz_timer_add(pd, OZ_TIMER_TOUT,
+ pd->last_rx_time_j + pd->presleep_j, 1);
+ if (pkt_num != pd->last_rx_pkt_num) {
+ pd->last_rx_pkt_num = pkt_num;
+ } else {
+ dup = 1;
+ oz_trace("Duplicate frame\n");
+ }
+ }
+
+ if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
+ pd->last_sent_frame = &pd->tx_queue;
+ if (oz_hdr->control & OZ_F_ACK) {
+ /* Retire completed frames */
+ oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
+ }
+ if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
+ (pd->state == OZ_PD_S_CONNECTED)) {
+ int backlog = pd->nb_queued_frames;
+ pd->trigger_pkt_num = pkt_num;
+ /* Send queued frames */
+ while (oz_prepare_frame(pd, 0) >= 0)
+ ;
+ oz_send_queued_frames(pd, backlog);
+ }
+ }
+
+ length -= sizeof(struct oz_hdr);
+ elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
+
+ while (length >= sizeof(struct oz_elt)) {
+ length -= sizeof(struct oz_elt) + elt->length;
+ if (length < 0)
+ break;
+ switch (elt->type) {
+ case OZ_ELT_CONNECT_REQ:
+ oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
+ oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
+ pd = oz_connect_req(pd, elt, src_addr, skb->dev);
+ break;
+ case OZ_ELT_DISCONNECT:
+ oz_trace("RX: OZ_ELT_DISCONNECT\n");
+ if (pd)
+ oz_pd_sleep(pd);
+ break;
+ case OZ_ELT_UPDATE_PARAM_REQ: {
+ struct oz_elt_update_param *body =
+ (struct oz_elt_update_param *)(elt + 1);
+ oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
+ if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
+ spin_lock(&g_polling_lock);
+ pd_set_keepalive(pd, body->keepalive);
+ pd_set_presleep(pd, body->presleep);
+ spin_unlock(&g_polling_lock);
+ }
+ }
+ break;
+ case OZ_ELT_FAREWELL_REQ: {
+ struct oz_elt_farewell *body =
+ (struct oz_elt_farewell *)(elt + 1);
+ oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
+ oz_add_farewell(pd, body->ep_num,
+ body->index, body->report,
+ elt->length + 1 - sizeof(*body));
+ }
+ break;
+ case OZ_ELT_APP_DATA:
+ if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
+ struct oz_app_hdr *app_hdr =
+ (struct oz_app_hdr *)(elt+1);
+ if (dup)
+ break;
+ oz_handle_app_elt(pd, app_hdr->app_id, elt);
+ }
+ break;
+ default:
+ oz_trace("RX: Unknown elt %02x\n", elt->type);
+ }
+ elt = oz_next_elt(elt);
+ }
+done:
+ if (pd)
+ oz_pd_put(pd);
+ consume_skb(skb);
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_protocol_term(void)
+{
+ struct list_head *chain = 0;
+ del_timer_sync(&g_timer);
+ /* Walk the list of bindings and remove each one.
+ */
+ spin_lock_bh(&g_binding_lock);
+ while (g_binding) {
+ struct oz_binding *b = g_binding;
+ g_binding = b->next;
+ spin_unlock_bh(&g_binding_lock);
+ dev_remove_pack(&b->ptype);
+ if (b->ptype.dev)
+ dev_put(b->ptype.dev);
+ kfree(b);
+ spin_lock_bh(&g_binding_lock);
+ }
+ spin_unlock_bh(&g_binding_lock);
+ /* Walk the list of PDs and stop each one. This causes the PD to be
+ * removed from the list so we can just pull each one from the head
+ * of the list.
+ */
+ spin_lock_bh(&g_polling_lock);
+ while (!list_empty(&g_pd_list)) {
+ struct oz_pd *pd =
+ list_first_entry(&g_pd_list, struct oz_pd, link);
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ spin_lock_bh(&g_polling_lock);
+ }
+ chain = g_timer_pool;
+ g_timer_pool = 0;
+ spin_unlock_bh(&g_polling_lock);
+ while (chain) {
+ struct oz_timer *t = container_of(chain, struct oz_timer, link);
+ chain = chain->next;
+ kfree(t);
+ }
+ oz_trace("Protocol stopped\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_pd_handle_timer(struct oz_pd *pd, int type)
+{
+ switch (type) {
+ case OZ_TIMER_TOUT:
+ oz_pd_sleep(pd);
+ break;
+ case OZ_TIMER_STOP:
+ oz_pd_stop(pd);
+ break;
+ case OZ_TIMER_HEARTBEAT: {
+ u16 apps = 0;
+ spin_lock_bh(&g_polling_lock);
+ pd->heartbeat_requested = 0;
+ if (pd->state & OZ_PD_S_CONNECTED)
+ apps = pd->total_apps;
+ spin_unlock_bh(&g_polling_lock);
+ if (apps)
+ oz_pd_heartbeat(pd, apps);
+ }
+ break;
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_protocol_timer(unsigned long arg)
+{
+ struct oz_timer *t;
+ struct oz_timer *t2;
+ struct oz_pd *pd;
+ spin_lock_bh(&g_polling_lock);
+ if (!g_cur_timer) {
+ /* This happens if we remove the current timer but can't stop
+ * the timer from firing. In this case just get out.
+ */
+ oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
+ spin_unlock_bh(&g_polling_lock);
+ return;
+ }
+ g_timer_state = OZ_TIMER_IN_HANDLER;
+ t = g_cur_timer;
+ g_cur_timer = 0;
+ list_del(&t->link);
+ spin_unlock_bh(&g_polling_lock);
+ do {
+ pd = t->pd;
+ oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
+ oz_pd_handle_timer(pd, t->type);
+ spin_lock_bh(&g_polling_lock);
+ if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
+ t->link.next = g_timer_pool;
+ g_timer_pool = &t->link;
+ g_timer_pool_count++;
+ t = 0;
+ }
+ if (!list_empty(&g_timer_list)) {
+ t2 = container_of(g_timer_list.next,
+ struct oz_timer, link);
+ if (time_before_eq(t2->due_time, jiffies))
+ list_del(&t2->link);
+ else
+ t2 = 0;
+ } else {
+ t2 = 0;
+ }
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_put(pd);
+ if (t)
+ kfree(t);
+ t = t2;
+ } while (t);
+ g_timer_state = OZ_TIMER_IDLE;
+ oz_protocol_timer_start();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_protocol_timer_start(void)
+{
+ spin_lock_bh(&g_polling_lock);
+ if (!list_empty(&g_timer_list)) {
+ g_cur_timer =
+ container_of(g_timer_list.next, struct oz_timer, link);
+ if (g_timer_state == OZ_TIMER_SET) {
+ oz_event_log(OZ_EVT_TIMER_CTRL, 3,
+ (u16)g_cur_timer->type, 0,
+ (unsigned)g_cur_timer->due_time);
+ mod_timer(&g_timer, g_cur_timer->due_time);
+ } else {
+ oz_event_log(OZ_EVT_TIMER_CTRL, 4,
+ (u16)g_cur_timer->type, 0,
+ (unsigned)g_cur_timer->due_time);
+ g_timer.expires = g_cur_timer->due_time;
+ g_timer.function = oz_protocol_timer;
+ g_timer.data = 0;
+ add_timer(&g_timer);
+ }
+ g_timer_state = OZ_TIMER_SET;
+ } else {
+ oz_trace("No queued timers\n");
+ }
+ spin_unlock_bh(&g_polling_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
+ int remove)
+{
+ struct list_head *e;
+ struct oz_timer *t = 0;
+ int restart_needed = 0;
+ oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
+ spin_lock(&g_polling_lock);
+ if (remove) {
+ list_for_each(e, &g_timer_list) {
+ t = container_of(e, struct oz_timer, link);
+ if ((t->pd == pd) && (t->type == type)) {
+ if (g_cur_timer == t) {
+ restart_needed = 1;
+ g_cur_timer = 0;
+ }
+ list_del(e);
+ break;
+ }
+ t = 0;
+ }
+ }
+ if (!t) {
+ if (g_timer_pool) {
+ t = container_of(g_timer_pool, struct oz_timer, link);
+ g_timer_pool = g_timer_pool->next;
+ g_timer_pool_count--;
+ } else {
+ t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
+ }
+ if (t) {
+ t->pd = pd;
+ t->type = type;
+ oz_pd_get(pd);
+ }
+ }
+ if (t) {
+ struct oz_timer *t2;
+ t->due_time = due_time;
+ list_for_each(e, &g_timer_list) {
+ t2 = container_of(e, struct oz_timer, link);
+ if (time_before(due_time, t2->due_time)) {
+ if (t2 == g_cur_timer) {
+ g_cur_timer = 0;
+ restart_needed = 1;
+ }
+ break;
+ }
+ }
+ list_add_tail(&t->link, e);
+ }
+ if (g_timer_state == OZ_TIMER_IDLE)
+ restart_needed = 1;
+ else if (g_timer_state == OZ_TIMER_IN_HANDLER)
+ restart_needed = 0;
+ spin_unlock(&g_polling_lock);
+ if (restart_needed)
+ oz_protocol_timer_start();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_timer_delete(struct oz_pd *pd, int type)
+{
+ struct list_head *chain = 0;
+ struct oz_timer *t;
+ struct oz_timer *n;
+ int restart_needed = 0;
+ int release = 0;
+ oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
+ spin_lock(&g_polling_lock);
+ list_for_each_entry_safe(t, n, &g_timer_list, link) {
+ if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
+ if (g_cur_timer == t) {
+ restart_needed = 1;
+ g_cur_timer = 0;
+ del_timer(&g_timer);
+ }
+ list_del(&t->link);
+ release++;
+ if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
+ t->link.next = g_timer_pool;
+ g_timer_pool = &t->link;
+ g_timer_pool_count++;
+ } else {
+ t->link.next = chain;
+ chain = &t->link;
+ }
+ if (type)
+ break;
+ }
+ }
+ if (g_timer_state == OZ_TIMER_IN_HANDLER)
+ restart_needed = 0;
+ else if (restart_needed)
+ g_timer_state = OZ_TIMER_IDLE;
+ spin_unlock(&g_polling_lock);
+ if (restart_needed)
+ oz_protocol_timer_start();
+ while (release--)
+ oz_pd_put(pd);
+ while (chain) {
+ t = container_of(chain, struct oz_timer, link);
+ chain = chain->next;
+ kfree(t);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_request_heartbeat(struct oz_pd *pd)
+{
+ unsigned long now = jiffies;
+ unsigned long t;
+ spin_lock(&g_polling_lock);
+ if (pd->heartbeat_requested) {
+ spin_unlock(&g_polling_lock);
+ return;
+ }
+ if (pd->pulse_period_j)
+ t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
+ else
+ t = now + 1;
+ pd->heartbeat_requested = 1;
+ spin_unlock(&g_polling_lock);
+ oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+struct oz_pd *oz_pd_find(u8 *mac_addr)
+{
+ struct oz_pd *pd;
+ struct list_head *e;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ pd = container_of(e, struct oz_pd, link);
+ if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ atomic_inc(&pd->ref_count);
+ spin_unlock_bh(&g_polling_lock);
+ return pd;
+ }
+ }
+ spin_unlock_bh(&g_polling_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_app_enable(int app_id, int enable)
+{
+ if (app_id <= OZ_APPID_MAX) {
+ spin_lock_bh(&g_polling_lock);
+ if (enable)
+ g_apps |= (1<<app_id);
+ else
+ g_apps &= ~(1<<app_id);
+ spin_unlock_bh(&g_polling_lock);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ spin_lock_bh(&g_rx_queue.lock);
+ if (g_processing_rx) {
+ /* We already hold the lock so use __ variant.
+ */
+ __skb_queue_head(&g_rx_queue, skb);
+ spin_unlock_bh(&g_rx_queue.lock);
+ } else {
+ g_processing_rx = 1;
+ do {
+
+ spin_unlock_bh(&g_rx_queue.lock);
+ oz_rx_frame(skb);
+ spin_lock_bh(&g_rx_queue.lock);
+ if (skb_queue_empty(&g_rx_queue)) {
+ g_processing_rx = 0;
+ spin_unlock_bh(&g_rx_queue.lock);
+ break;
+ }
+ /* We already hold the lock so use __ variant.
+ */
+ skb = __skb_dequeue(&g_rx_queue);
+ } while (1);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_binding_add(char *net_dev)
+{
+ struct oz_binding *binding;
+
+ binding = kmalloc(sizeof(struct oz_binding), GFP_ATOMIC);
+ if (binding) {
+ binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
+ binding->ptype.func = oz_pkt_recv;
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
+ if (net_dev && *net_dev) {
+ oz_trace("Adding binding: %s\n", net_dev);
+ binding->ptype.dev =
+ dev_get_by_name(&init_net, net_dev);
+ if (binding->ptype.dev == 0) {
+ oz_trace("Netdev %s not found\n", net_dev);
+ kfree(binding);
+ binding = 0;
+ }
+ } else {
+ oz_trace("Binding to all netcards\n");
+ binding->ptype.dev = 0;
+ }
+ if (binding) {
+ dev_add_pack(&binding->ptype);
+ spin_lock_bh(&g_binding_lock);
+ binding->next = g_binding;
+ g_binding = binding;
+ spin_unlock_bh(&g_binding_lock);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int compare_binding_name(char *s1, char *s2)
+{
+ int i;
+ for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
+ if (*s1 != *s2)
+ return 0;
+ if (!*s1++)
+ return 1;
+ s2++;
+ }
+ return 1;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void pd_stop_all_for_device(struct net_device *net_dev)
+{
+ struct list_head h;
+ struct oz_pd *pd;
+ struct oz_pd *n;
+ INIT_LIST_HEAD(&h);
+ spin_lock_bh(&g_polling_lock);
+ list_for_each_entry_safe(pd, n, &g_pd_list, link) {
+ if (pd->net_dev == net_dev) {
+ list_move(&pd->link, &h);
+ oz_pd_get(pd);
+ }
+ }
+ spin_unlock_bh(&g_polling_lock);
+ while (!list_empty(&h)) {
+ pd = list_first_entry(&h, struct oz_pd, link);
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_binding_remove(char *net_dev)
+{
+ struct oz_binding *binding = 0;
+ struct oz_binding **link;
+ oz_trace("Removing binding: %s\n", net_dev);
+ spin_lock_bh(&g_binding_lock);
+ binding = g_binding;
+ link = &g_binding;
+ while (binding) {
+ if (compare_binding_name(binding->name, net_dev)) {
+ oz_trace("Binding '%s' found\n", net_dev);
+ *link = binding->next;
+ break;
+ } else {
+ link = &binding;
+ binding = binding->next;
+ }
+ }
+ spin_unlock_bh(&g_binding_lock);
+ if (binding) {
+ dev_remove_pack(&binding->ptype);
+ if (binding->ptype.dev) {
+ dev_put(binding->ptype.dev);
+ pd_stop_all_for_device(binding->ptype.dev);
+ }
+ kfree(binding);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static char *oz_get_next_device_name(char *s, char *dname, int max_size)
+{
+ while (*s == ',')
+ s++;
+ while (*s && (*s != ',') && max_size > 1) {
+ *dname++ = *s++;
+ max_size--;
+ }
+ *dname = 0;
+ return s;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_protocol_init(char *devs)
+{
+ skb_queue_head_init(&g_rx_queue);
+ if (devs && (devs[0] == '*')) {
+ oz_binding_add(0);
+ } else {
+ char d[32];
+ while (*devs) {
+ devs = oz_get_next_device_name(devs, d, sizeof(d));
+ if (d[0])
+ oz_binding_add(d);
+ }
+ }
+ init_timer(&g_timer);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
+{
+ struct oz_pd *pd;
+ struct list_head *e;
+ int count = 0;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ if (count >= max_count)
+ break;
+ pd = container_of(e, struct oz_pd, link);
+ memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
+ }
+ spin_unlock_bh(&g_polling_lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
+*/
+void oz_polling_lock_bh(void)
+{
+ spin_lock_bh(&g_polling_lock);
+}
+/*------------------------------------------------------------------------------
+*/
+void oz_polling_unlock_bh(void)
+{
+ spin_unlock_bh(&g_polling_lock);
+}
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
new file mode 100644
index 00000000000..89aea28bd8d
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -0,0 +1,69 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPROTO_H
+#define _OZPROTO_H
+
+#include <asm/byteorder.h>
+#include "ozconfig.h"
+#include "ozappif.h"
+
+#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
+
+/* Converts millisecs to jiffies.
+ */
+#define oz_ms_to_jiffies(__x) (((__x)*1000)/HZ)
+
+/* Quantum milliseconds.
+ */
+#define OZ_QUANTUM_MS 8
+/* Quantum jiffies
+ */
+#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
+/* Default timeouts.
+ */
+#define OZ_CONNECTION_TOUT_J (2*HZ)
+#define OZ_PRESLEEP_TOUT_J (11*HZ)
+
+/* Maximun sizes of tx frames. */
+#define OZ_MAX_TX_SIZE 1514
+
+/* Application handler functions.
+ */
+typedef int (*oz_app_init_fn_t)(void);
+typedef void (*oz_app_term_fn_t)(void);
+typedef int (*oz_app_start_fn_t)(struct oz_pd *pd, int resume);
+typedef void (*oz_app_stop_fn_t)(struct oz_pd *pd, int pause);
+typedef void (*oz_app_rx_fn_t)(struct oz_pd *pd, struct oz_elt *elt);
+typedef int (*oz_app_hearbeat_fn_t)(struct oz_pd *pd);
+typedef void (*oz_app_farewell_fn_t)(struct oz_pd *pd, u8 ep_num,
+ u8 *data, u8 len);
+
+struct oz_app_if {
+ oz_app_init_fn_t init;
+ oz_app_term_fn_t term;
+ oz_app_start_fn_t start;
+ oz_app_stop_fn_t stop;
+ oz_app_rx_fn_t rx;
+ oz_app_hearbeat_fn_t heartbeat;
+ oz_app_farewell_fn_t farewell;
+ int app_id;
+};
+
+int oz_protocol_init(char *devs);
+void oz_protocol_term(void);
+int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
+void oz_app_enable(int app_id, int enable);
+struct oz_pd *oz_pd_find(u8 *mac_addr);
+void oz_binding_add(char *net_dev);
+void oz_binding_remove(char *net_dev);
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
+ int remove);
+void oz_timer_delete(struct oz_pd *pd, int type);
+void oz_pd_request_heartbeat(struct oz_pd *pd);
+void oz_polling_lock_bh(void);
+void oz_polling_unlock_bh(void);
+
+#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
new file mode 100644
index 00000000000..b3e7d77f3ff
--- /dev/null
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -0,0 +1,372 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPROTOCOL_H
+#define _OZPROTOCOL_H
+
+#define PACKED __packed
+
+#define OZ_ETHERTYPE 0x892e
+
+/* Status codes
+ */
+#define OZ_STATUS_SUCCESS 0
+#define OZ_STATUS_INVALID_PARAM 1
+#define OZ_STATUS_TOO_MANY_PDS 2
+#define OZ_STATUS_NOT_ALLOWED 4
+#define OZ_STATUS_SESSION_MISMATCH 5
+#define OZ_STATUS_SESSION_TEARDOWN 6
+
+/* This is the generic element header.
+ Every element starts with this.
+ */
+struct oz_elt {
+ u8 type;
+ u8 length;
+} PACKED;
+
+#define oz_next_elt(__elt) \
+ (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
+
+/* Protocol element IDs.
+ */
+#define OZ_ELT_CONNECT_REQ 0x06
+#define OZ_ELT_CONNECT_RSP 0x07
+#define OZ_ELT_DISCONNECT 0x08
+#define OZ_ELT_UPDATE_PARAM_REQ 0x11
+#define OZ_ELT_FAREWELL_REQ 0x12
+#define OZ_ELT_APP_DATA 0x31
+
+/* This is the Ozmo header which is the first Ozmo specific part
+ * of a frame and comes after the MAC header.
+ */
+struct oz_hdr {
+ u8 control;
+ u8 last_pkt_num;
+ u32 pkt_num;
+} PACKED;
+
+#define OZ_PROTOCOL_VERSION 0x1
+/* Bits in the control field. */
+#define OZ_VERSION_MASK 0xc
+#define OZ_VERSION_SHIFT 2
+#define OZ_F_ACK 0x10
+#define OZ_F_ISOC 0x20
+#define OZ_F_MORE_DATA 0x40
+#define OZ_F_ACK_REQUESTED 0x80
+
+#define oz_get_prot_ver(__x) (((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT)
+
+/* Used to select the bits of packet number to put in the last_pkt_num.
+ */
+#define OZ_LAST_PN_MASK 0x00ff
+
+#define OZ_LAST_PN_HALF_CYCLE 127
+
+/* Connect request data structure.
+ */
+struct oz_elt_connect_req {
+ u8 mode;
+ u8 resv1[16];
+ u8 pd_info;
+ u8 session_id;
+ u8 presleep;
+ u8 resv2;
+ u8 host_vendor;
+ u8 keep_alive;
+ u16 apps;
+ u8 max_len_div16;
+ u8 ms_per_isoc;
+ u8 resv3[2];
+} PACKED;
+
+/* mode field bits.
+ */
+#define OZ_MODE_POLLED 0x0
+#define OZ_MODE_TRIGGERED 0x1
+#define OZ_MODE_MASK 0xf
+#define OZ_F_ISOC_NO_ELTS 0x40
+#define OZ_F_ISOC_ANYTIME 0x80
+
+/* Keep alive field.
+ */
+#define OZ_KALIVE_TYPE_MASK 0xc0
+#define OZ_KALIVE_VALUE_MASK 0x3f
+#define OZ_KALIVE_SPECIAL 0x00
+#define OZ_KALIVE_SECS 0x40
+#define OZ_KALIVE_MINS 0x80
+#define OZ_KALIVE_HOURS 0xc0
+
+/* Connect response data structure.
+ */
+struct oz_elt_connect_rsp {
+ u8 mode;
+ u8 status;
+ u8 resv1[3];
+ u8 session_id;
+ u16 apps;
+ u32 resv2;
+} PACKED;
+
+struct oz_elt_farewell {
+ u8 ep_num;
+ u8 index;
+ u8 report[1];
+} PACKED;
+
+struct oz_elt_update_param {
+ u8 resv1[16];
+ u8 presleep;
+ u8 resv2;
+ u8 host_vendor;
+ u8 keepalive;
+} PACKED;
+
+/* Header common to all application elements.
+ */
+struct oz_app_hdr {
+ u8 app_id;
+ u8 elt_seq_num;
+} PACKED;
+
+/* Values for app_id.
+ */
+#define OZ_APPID_USB 0x1
+#define OZ_APPID_UNUSED1 0x2
+#define OZ_APPID_UNUSED2 0x3
+#define OZ_APPID_SERIAL 0x4
+#define OZ_APPID_MAX OZ_APPID_SERIAL
+#define OZ_NB_APPS (OZ_APPID_MAX+1)
+
+/* USB header common to all elements for the USB application.
+ * This header extends the oz_app_hdr and comes directly after
+ * the element header in a USB application.
+ */
+struct oz_usb_hdr {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+} PACKED;
+
+
+
+/* USB requests element subtypes (type field of hs_usb_hdr).
+ */
+#define OZ_GET_DESC_REQ 1
+#define OZ_GET_DESC_RSP 2
+#define OZ_SET_CONFIG_REQ 3
+#define OZ_SET_CONFIG_RSP 4
+#define OZ_SET_INTERFACE_REQ 5
+#define OZ_SET_INTERFACE_RSP 6
+#define OZ_VENDOR_CLASS_REQ 7
+#define OZ_VENDOR_CLASS_RSP 8
+#define OZ_GET_STATUS_REQ 9
+#define OZ_GET_STATUS_RSP 10
+#define OZ_CLEAR_FEATURE_REQ 11
+#define OZ_CLEAR_FEATURE_RSP 12
+#define OZ_SET_FEATURE_REQ 13
+#define OZ_SET_FEATURE_RSP 14
+#define OZ_GET_CONFIGURATION_REQ 15
+#define OZ_GET_CONFIGURATION_RSP 16
+#define OZ_GET_INTERFACE_REQ 17
+#define OZ_GET_INTERFACE_RSP 18
+#define OZ_SYNCH_FRAME_REQ 19
+#define OZ_SYNCH_FRAME_RSP 20
+#define OZ_USB_ENDPOINT_DATA 23
+
+#define OZ_REQD_D2H 0x80
+
+struct oz_get_desc_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u16 offset;
+ u16 size;
+ u8 req_type;
+ u8 desc_type;
+ u16 w_index;
+ u8 index;
+} PACKED;
+
+/* Values for desc_type field.
+*/
+#define OZ_DESC_DEVICE 0x01
+#define OZ_DESC_CONFIG 0x02
+#define OZ_DESC_STRING 0x03
+
+/* Values for req_type field.
+ */
+#define OZ_RECP_MASK 0x1F
+#define OZ_RECP_DEVICE 0x00
+#define OZ_RECP_INTERFACE 0x01
+#define OZ_RECP_ENDPOINT 0x02
+
+#define OZ_REQT_MASK 0x60
+#define OZ_REQT_STD 0x00
+#define OZ_REQT_CLASS 0x20
+#define OZ_REQT_VENDOR 0x40
+
+struct oz_get_desc_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u16 offset;
+ u16 total_size;
+ u8 rcode;
+ u8 data[1];
+} PACKED;
+
+struct oz_feature_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 recipient;
+ u8 index;
+ u16 feature;
+} PACKED;
+
+struct oz_feature_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_set_config_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+} PACKED;
+
+struct oz_set_config_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_set_interface_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+ u8 alternative;
+} PACKED;
+
+struct oz_set_interface_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_get_interface_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+} PACKED;
+
+struct oz_get_interface_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+ u8 alternative;
+} PACKED;
+
+struct oz_vendor_class_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 req_type;
+ u8 request;
+ u16 value;
+ u16 index;
+ u8 data[1];
+} PACKED;
+
+struct oz_vendor_class_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+ u8 data[1];
+} PACKED;
+
+struct oz_data {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+} PACKED;
+
+struct oz_isoc_fixed {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u8 unit_size;
+ u8 frame_number;
+ u8 data[1];
+} PACKED;
+
+struct oz_multiple_fixed {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u8 unit_size;
+ u8 data[1];
+} PACKED;
+
+struct oz_fragmented {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u16 total_size;
+ u16 offset;
+ u8 data[1];
+} PACKED;
+
+/* Note: the following does not get packaged in an element in the same way
+ * that other data formats are packaged. Instead the data is put in a frame
+ * directly after the oz_header and is the only permitted data in such a
+ * frame. The length of the data is directly determined from the frame size.
+ */
+struct oz_isoc_large {
+ u8 endpoint;
+ u8 format;
+ u8 ms_data;
+ u8 frame_number;
+} PACKED;
+
+#define OZ_DATA_F_TYPE_MASK 0xF
+#define OZ_DATA_F_MULTIPLE_FIXED 0x1
+#define OZ_DATA_F_MULTIPLE_VAR 0x2
+#define OZ_DATA_F_ISOC_FIXED 0x3
+#define OZ_DATA_F_ISOC_VAR 0x4
+#define OZ_DATA_F_FRAGMENTED 0x5
+#define OZ_DATA_F_ISOC_LARGE 0x7
+
+#endif /* _OZPROTOCOL_H */
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
new file mode 100644
index 00000000000..353ead24fd7
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.c
@@ -0,0 +1,36 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include "ozconfig.h"
+#include "oztrace.h"
+
+#ifdef WANT_VERBOSE_TRACE
+unsigned long trace_flags =
+ 0
+#ifdef WANT_TRACE_STREAM
+ | OZ_TRACE_STREAM
+#endif /* WANT_TRACE_STREAM */
+#ifdef WANT_TRACE_URB
+ | OZ_TRACE_URB
+#endif /* WANT_TRACE_URB */
+
+#ifdef WANT_TRACE_CTRL_DETAIL
+ | OZ_TRACE_CTRL_DETAIL
+#endif /* WANT_TRACE_CTRL_DETAIL */
+
+#ifdef WANT_TRACE_HUB
+ | OZ_TRACE_HUB
+#endif /* WANT_TRACE_HUB */
+
+#ifdef WANT_TRACE_RX_FRAMES
+ | OZ_TRACE_RX_FRAMES
+#endif /* WANT_TRACE_RX_FRAMES */
+
+#ifdef WANT_TRACE_TX_FRAMES
+ | OZ_TRACE_TX_FRAMES
+#endif /* WANT_TRACE_TX_FRAMES */
+ ;
+#endif /* WANT_VERBOSE_TRACE */
+
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
new file mode 100644
index 00000000000..8293b24c5a7
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.h
@@ -0,0 +1,35 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZTRACE_H_
+#define _OZTRACE_H_
+#include "ozconfig.h"
+
+#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
+
+#ifdef WANT_TRACE
+#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
+#ifdef WANT_VERBOSE_TRACE
+extern unsigned long trace_flags;
+#define oz_trace2(_flag, ...) \
+ do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
+ } while (0)
+#else
+#define oz_trace2(...)
+#endif /* #ifdef WANT_VERBOSE_TRACE */
+#else
+#define oz_trace(...)
+#define oz_trace2(...)
+#endif /* #ifdef WANT_TRACE */
+
+#define OZ_TRACE_STREAM 0x1
+#define OZ_TRACE_URB 0x2
+#define OZ_TRACE_CTRL_DETAIL 0x4
+#define OZ_TRACE_HUB 0x8
+#define OZ_TRACE_RX_FRAMES 0x10
+#define OZ_TRACE_TX_FRAMES 0x20
+
+#endif /* Sentry */
+
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
new file mode 100644
index 00000000000..55b9afbbe47
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -0,0 +1,53 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/usb.h>
+#include "ozconfig.h"
+#ifdef WANT_URB_PARANOIA
+#include "ozurbparanoia.h"
+#include "oztrace.h"
+/*-----------------------------------------------------------------------------
+ */
+#define OZ_MAX_URBS 1000
+struct urb *g_urb_memory[OZ_MAX_URBS];
+int g_nb_urbs;
+DEFINE_SPINLOCK(g_urb_mem_lock);
+/*-----------------------------------------------------------------------------
+ */
+void oz_remember_urb(struct urb *urb)
+{
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_urb_mem_lock, irq_state);
+ if (g_nb_urbs < OZ_MAX_URBS) {
+ g_urb_memory[g_nb_urbs++] = urb;
+ oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
+ } else {
+ oz_trace("ERROR urb buffer full\n");
+ }
+ spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_forget_urb(struct urb *urb)
+{
+ unsigned long irq_state;
+ int i;
+ int rc = -1;
+ spin_lock_irqsave(&g_urb_mem_lock, irq_state);
+ for (i = 0; i < g_nb_urbs; i++) {
+ if (g_urb_memory[i] == urb) {
+ rc = 0;
+ if (--g_nb_urbs > i)
+ memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
+ (g_nb_urbs - i) * sizeof(struct urb *));
+ oz_trace("%lu: urb down = %d %p\n",
+ jiffies, g_nb_urbs, urb);
+ }
+ }
+ spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
+ return rc;
+}
+#endif /* #ifdef WANT_URB_PARANOIA */
+
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
new file mode 100644
index 00000000000..00f5a3a81bc
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.h
@@ -0,0 +1,19 @@
+#ifndef _OZURBPARANOIA_H
+#define _OZURBPARANOIA_H
+/* -----------------------------------------------------------------------------
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * Copyright (c) 2011 Ozmo Inc
+ * -----------------------------------------------------------------------------
+ */
+
+#ifdef WANT_URB_PARANOIA
+void oz_remember_urb(struct urb *urb);
+int oz_forget_urb(struct urb *urb);
+#else
+#define oz_remember_urb(__x)
+#define oz_forget_urb(__x) 0
+#endif /* WANT_URB_PARANOIA */
+
+
+#endif /* _OZURBPARANOIA_H */
+
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
new file mode 100644
index 00000000000..3acf5980d7c
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -0,0 +1,43 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZUSBIF_H
+#define _OZUSBIF_H
+
+#include <linux/usb.h>
+
+/* Reference counting functions.
+ */
+void oz_usb_get(void *hpd);
+void oz_usb_put(void *hpd);
+
+/* Stream functions.
+ */
+int oz_usb_stream_create(void *hpd, u8 ep_num);
+int oz_usb_stream_delete(void *hpd, u8 ep_num);
+
+/* Request functions.
+ */
+int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
+ u8 *data, int data_len);
+int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
+ u8 index, u16 windex, int offset, int len);
+int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
+void oz_usb_request_heartbeat(void *hpd);
+
+/* Confirmation functions.
+ */
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
+ u8 *desc, int length, int offset, int total_size);
+void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
+ u8 *data, int data_len);
+
+/* Indication functions.
+ */
+void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len);
+
+int oz_hcd_heartbeat(void *hport);
+
+#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
new file mode 100644
index 00000000000..9e74f960238
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -0,0 +1,245 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file provides protocol independent part of the implementation of the USB
+ * service for a PD.
+ * The implementation of this service is split into two parts the first of which
+ * is protocol independent and the second contains protocol specific details.
+ * This split is to allow alternative protocols to be defined.
+ * The implemenation of this service uses ozhcd.c to implement a USB HCD.
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbif.h"
+#include "ozhcd.h"
+#include "oztrace.h"
+#include "ozusbsvc.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * This is called once when the driver is loaded to initialise the USB service.
+ * Context: process
+ */
+int oz_usb_init(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, 0, 0);
+ return oz_hcd_init();
+}
+/*------------------------------------------------------------------------------
+ * This is called once when the driver is unloaded to terminate the USB service.
+ * Context: process
+ */
+void oz_usb_term(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, 0, 0);
+ oz_hcd_term();
+}
+/*------------------------------------------------------------------------------
+ * This is called when the USB service is started or resumed for a PD.
+ * Context: softirq
+ */
+int oz_usb_start(struct oz_pd *pd, int resume)
+{
+ int rc = 0;
+ struct oz_usb_ctx *usb_ctx;
+ struct oz_usb_ctx *old_ctx = 0;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, 0, resume);
+ if (resume) {
+ oz_trace("USB service resumed.\n");
+ return 0;
+ }
+ oz_trace("USB service started.\n");
+ /* Create a USB context in case we need one. If we find the PD already
+ * has a USB context then we will destroy it.
+ */
+ usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
+ if (usb_ctx == 0)
+ return -ENOMEM;
+ atomic_set(&usb_ctx->ref_count, 1);
+ usb_ctx->pd = pd;
+ usb_ctx->stopped = 0;
+ /* Install the USB context if the PD doesn't already have one.
+ * If it does already have one then destroy the one we have just
+ * created.
+ */
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ old_ctx = pd->app_ctx[OZ_APPID_USB-1];
+ if (old_ctx == 0)
+ pd->app_ctx[OZ_APPID_USB-1] = usb_ctx;
+ oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (old_ctx) {
+ oz_trace("Already have USB context.\n");
+ kfree(usb_ctx);
+ usb_ctx = old_ctx;
+ } else if (usb_ctx) {
+ /* Take a reference to the PD. This will be released when
+ * the USB context is destroyed.
+ */
+ oz_pd_get(pd);
+ }
+ /* If we already had a USB context and had obtained a port from
+ * the USB HCD then just reset the port. If we didn't have a port
+ * then report the arrival to the USB HCD so we get one.
+ */
+ if (usb_ctx->hport) {
+ oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
+ } else {
+ usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
+ if (usb_ctx->hport == 0) {
+ oz_trace("USB hub returned null port.\n");
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ pd->app_ctx[OZ_APPID_USB-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ oz_usb_put(usb_ctx);
+ rc = -1;
+ }
+ }
+ oz_usb_put(usb_ctx);
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * This is called when the USB service is stopped or paused for a PD.
+ * Context: softirq or process
+ */
+void oz_usb_stop(struct oz_pd *pd, int pause)
+{
+ struct oz_usb_ctx *usb_ctx;
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, 0, pause);
+ if (pause) {
+ oz_trace("USB service paused.\n");
+ return;
+ }
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ pd->app_ctx[OZ_APPID_USB-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx) {
+ unsigned long tout = jiffies + HZ;
+ oz_trace("USB service stopping...\n");
+ usb_ctx->stopped = 1;
+ /* At this point the reference count on the usb context should
+ * be 2 - one from when we created it and one from the hcd
+ * which claims a reference. Since stopped = 1 no one else
+ * should get in but someone may already be in. So wait
+ * until they leave but timeout after 1 second.
+ */
+ while ((atomic_read(&usb_ctx->ref_count) > 2) &&
+ time_before(jiffies, tout))
+ ;
+ oz_trace("USB service stopped.\n");
+ oz_hcd_pd_departed(usb_ctx->hport);
+ /* Release the reference taken in oz_usb_start.
+ */
+ oz_usb_put(usb_ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * This increments the reference count of the context area for a specific PD.
+ * This ensures this context area does not disappear while still in use.
+ * Context: softirq
+ */
+void oz_usb_get(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ atomic_inc(&usb_ctx->ref_count);
+}
+/*------------------------------------------------------------------------------
+ * This decrements the reference count of the context area for a specific PD
+ * and destroys the context area if the reference count becomes zero.
+ * Context: softirq or process
+ */
+void oz_usb_put(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (atomic_dec_and_test(&usb_ctx->ref_count)) {
+ oz_trace("Dealloc USB context.\n");
+ oz_pd_put(usb_ctx->pd);
+ kfree(usb_ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_heartbeat(struct oz_pd *pd)
+{
+ struct oz_usb_ctx *usb_ctx;
+ int rc = 0;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return rc;
+ if (usb_ctx->stopped)
+ goto done;
+ if (usb_ctx->hport)
+ if (oz_hcd_heartbeat(usb_ctx->hport))
+ rc = 1;
+done:
+ oz_usb_put(usb_ctx);
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_stream_create(void *hpd, u8 ep_num)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ oz_isoc_stream_create(pd, ep_num);
+ } else {
+ oz_pd_get(pd);
+ if (oz_elt_stream_create(&pd->elt_buff, ep_num,
+ 4*pd->max_tx_size)) {
+ oz_pd_put(pd);
+ return -1;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_stream_delete(void *hpd, u8 ep_num)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (usb_ctx) {
+ struct oz_pd *pd = usb_ctx->pd;
+ if (pd) {
+ oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ oz_isoc_stream_delete(pd, ep_num);
+ } else {
+ if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
+ return -1;
+ oz_pd_put(pd);
+ }
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_usb_request_heartbeat(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (usb_ctx && usb_ctx->pd)
+ oz_pd_request_heartbeat(usb_ctx->pd);
+}
diff --git a/drivers/staging/ozwpan/ozusbsvc.h b/drivers/staging/ozwpan/ozusbsvc.h
new file mode 100644
index 00000000000..58e05a59be3
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.h
@@ -0,0 +1,32 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZUSBSVC_H
+#define _OZUSBSVC_H
+
+/*------------------------------------------------------------------------------
+ * Per PD context info stored in application context area of PD.
+ * This object is reference counted to ensure it doesn't disappear while
+ * still in use.
+ */
+struct oz_usb_ctx {
+ atomic_t ref_count;
+ u8 tx_seq_num;
+ u8 rx_seq_num;
+ struct oz_pd *pd;
+ void *hport;
+ int stopped;
+};
+
+int oz_usb_init(void);
+void oz_usb_term(void);
+int oz_usb_start(struct oz_pd *pd, int resume);
+void oz_usb_stop(struct oz_pd *pd, int pause);
+void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
+int oz_usb_heartbeat(struct oz_pd *pd);
+void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
+
+#endif /* _OZUSBSVC_H */
+
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
new file mode 100644
index 00000000000..66bd576bb5e
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -0,0 +1,437 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file implements the protocol specific parts of the USB service for a PD.
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbif.h"
+#include "ozhcd.h"
+#include "oztrace.h"
+#include "ozusbsvc.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
+ struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
+{
+ int ret;
+ struct oz_elt *elt = (struct oz_elt *)ei->data;
+ struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
+ elt->type = OZ_ELT_APP_DATA;
+ ei->app_id = OZ_APPID_USB;
+ ei->length = elt->length + sizeof(struct oz_elt);
+ app_hdr->app_id = OZ_APPID_USB;
+ spin_lock_bh(&eb->lock);
+ if (isoc == 0) {
+ app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
+ if (usb_ctx->tx_seq_num == 0)
+ usb_ctx->tx_seq_num = 1;
+ }
+ ret = oz_queue_elt_info(eb, isoc, strid, ei);
+ if (ret)
+ oz_elt_info_free(eb, ei);
+ spin_unlock_bh(&eb->lock);
+ return ret;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
+ u8 index, u16 windex, int offset, int len)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_get_desc_req *body;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ oz_trace(" req_type = 0x%x\n", req_type);
+ oz_trace(" desc_type = 0x%x\n", desc_type);
+ oz_trace(" index = 0x%x\n", index);
+ oz_trace(" windex = 0x%x\n", windex);
+ oz_trace(" offset = 0x%x\n", offset);
+ oz_trace(" len = 0x%x\n", len);
+ if (len > 200)
+ len = 200;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_get_desc_req);
+ body = (struct oz_get_desc_req *)(elt+1);
+ body->type = OZ_GET_DESC_REQ;
+ body->req_id = req_id;
+ put_unaligned(cpu_to_le16(offset), &body->offset);
+ put_unaligned(cpu_to_le16(len), &body->size);
+ body->req_type = req_type;
+ body->desc_type = desc_type;
+ body->w_index = windex;
+ body->index = index;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_set_config_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_set_config_req);
+ body = (struct oz_set_config_req *)(elt+1);
+ body->type = OZ_SET_CONFIG_REQ;
+ body->req_id = req_id;
+ body->index = index;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_set_interface_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_set_interface_req);
+ body = (struct oz_set_interface_req *)(elt+1);
+ body->type = OZ_SET_INTERFACE_REQ;
+ body->req_id = req_id;
+ body->index = index;
+ body->alternative = alt;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
+ u8 recipient, u8 index, __le16 feature)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_feature_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_feature_req);
+ body = (struct oz_feature_req *)(elt+1);
+ body->type = type;
+ body->req_id = req_id;
+ body->recipient = recipient;
+ body->index = index;
+ put_unaligned(feature, &body->feature);
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
+ u8 request, __le16 value, __le16 index, u8 *data, int data_len)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_vendor_class_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
+ body = (struct oz_vendor_class_req *)(elt+1);
+ body->type = OZ_VENDOR_CLASS_REQ;
+ body->req_id = req_id;
+ body->req_type = req_type;
+ body->request = request;
+ put_unaligned(value, &body->value);
+ put_unaligned(index, &body->index);
+ if (data_len)
+ memcpy(body->data, data, data_len);
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
+ u8 *data, int data_len)
+{
+ unsigned wvalue = le16_to_cpu(setup->wValue);
+ unsigned windex = le16_to_cpu(setup->wIndex);
+ unsigned wlength = le16_to_cpu(setup->wLength);
+ int rc = 0;
+ oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id,
+ (void *)(((unsigned long)(setup->wValue))<<16 |
+ ((unsigned long)setup->wIndex)),
+ setup->bRequestType);
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup->bRequest) {
+ case USB_REQ_GET_DESCRIPTOR:
+ rc = oz_usb_get_desc_req(hpd, req_id,
+ setup->bRequestType, (u8)(wvalue>>8),
+ (u8)wvalue, setup->wIndex, 0, wlength);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
+ break;
+ case USB_REQ_SET_INTERFACE: {
+ u8 if_num = (u8)windex;
+ u8 alt = (u8)wvalue;
+ rc = oz_usb_set_interface_req(hpd, req_id,
+ if_num, alt);
+ }
+ break;
+ case USB_REQ_SET_FEATURE:
+ rc = oz_usb_set_clear_feature_req(hpd, req_id,
+ OZ_SET_FEATURE_REQ,
+ setup->bRequestType & 0xf, (u8)windex,
+ setup->wValue);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ rc = oz_usb_set_clear_feature_req(hpd, req_id,
+ OZ_CLEAR_FEATURE_REQ,
+ setup->bRequestType & 0xf,
+ (u8)windex, setup->wValue);
+ break;
+ }
+ } else {
+ rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
+ setup->bRequest, setup->wValue, setup->wIndex,
+ data, data_len);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt_buf *eb;
+ int i;
+ int hdr_size;
+ u8 *data;
+ struct usb_iso_packet_descriptor *desc;
+
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ for (i = 0; i < urb->number_of_packets; i++) {
+ u8 *data;
+ desc = &urb->iso_frame_desc[i];
+ data = ((u8 *)urb->transfer_buffer)+desc->offset;
+ oz_send_isoc_unit(pd, ep_num, data, desc->length);
+ }
+ return 0;
+ }
+
+ hdr_size = sizeof(struct oz_isoc_fixed) - 1;
+ eb = &pd->elt_buff;
+ i = 0;
+ while (i < urb->number_of_packets) {
+ struct oz_elt_info *ei = oz_elt_info_alloc(eb);
+ struct oz_elt *elt;
+ struct oz_isoc_fixed *body;
+ int unit_count;
+ int unit_size;
+ int rem;
+ if (ei == 0)
+ return -1;
+ rem = MAX_ISOC_FIXED_DATA;
+ elt = (struct oz_elt *)ei->data;
+ body = (struct oz_isoc_fixed *)(elt + 1);
+ body->type = OZ_USB_ENDPOINT_DATA;
+ body->endpoint = ep_num;
+ body->format = OZ_DATA_F_ISOC_FIXED;
+ unit_size = urb->iso_frame_desc[i].length;
+ body->unit_size = (u8)unit_size;
+ data = ((u8 *)(elt+1)) + hdr_size;
+ unit_count = 0;
+ while (i < urb->number_of_packets) {
+ desc = &urb->iso_frame_desc[i];
+ if ((unit_size == desc->length) &&
+ (desc->length <= rem)) {
+ memcpy(data, ((u8 *)urb->transfer_buffer) +
+ desc->offset, unit_size);
+ data += unit_size;
+ rem -= unit_size;
+ unit_count++;
+ desc->status = 0;
+ desc->actual_length = desc->length;
+ i++;
+ } else {
+ break;
+ }
+ }
+ elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
+ /* Store the number of units in body->frame_number for the
+ * moment. This field will be correctly determined before
+ * the element is sent. */
+ body->frame_number = (u8)unit_count;
+ oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
+ pd->mode & OZ_F_ISOC_ANYTIME);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+ struct oz_usb_hdr *usb_hdr, int len)
+{
+ struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
+ switch (data_hdr->format) {
+ case OZ_DATA_F_MULTIPLE_FIXED: {
+ struct oz_multiple_fixed *body =
+ (struct oz_multiple_fixed *)data_hdr;
+ u8 *data = body->data;
+ int n = (len - sizeof(struct oz_multiple_fixed)+1)
+ / body->unit_size;
+ while (n--) {
+ oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
+ data, body->unit_size);
+ data += body->unit_size;
+ }
+ }
+ break;
+ case OZ_DATA_F_ISOC_FIXED: {
+ struct oz_isoc_fixed *body =
+ (struct oz_isoc_fixed *)data_hdr;
+ int data_len = len-sizeof(struct oz_isoc_fixed)+1;
+ int unit_size = body->unit_size;
+ u8 *data = body->data;
+ int count;
+ int i;
+ if (!unit_size)
+ break;
+ count = data_len/unit_size;
+ for (i = 0; i < count; i++) {
+ oz_hcd_data_ind(usb_ctx->hport,
+ body->endpoint, data, unit_size);
+ data += unit_size;
+ }
+ }
+ break;
+ }
+
+}
+/*------------------------------------------------------------------------------
+ * This is called when the PD has received a USB element. The type of element
+ * is determined and is then passed to an appropriate handler function.
+ * Context: softirq-serialized
+ */
+void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+ struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
+ struct oz_usb_ctx *usb_ctx;
+
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return; /* Context has gone so nothing to do. */
+ if (usb_ctx->stopped)
+ goto done;
+ /* If sequence number is non-zero then check it is not a duplicate.
+ * Zero sequence numbers are always accepted.
+ */
+ if (usb_hdr->elt_seq_num != 0) {
+ if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
+ /* Reject duplicate element. */
+ goto done;
+ }
+ usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
+ switch (usb_hdr->type) {
+ case OZ_GET_DESC_RSP: {
+ struct oz_get_desc_rsp *body =
+ (struct oz_get_desc_rsp *)usb_hdr;
+ int data_len = elt->length -
+ sizeof(struct oz_get_desc_rsp) + 1;
+ u16 offs = le16_to_cpu(get_unaligned(&body->offset));
+ u16 total_size =
+ le16_to_cpu(get_unaligned(&body->total_size));
+ oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, body->data,
+ data_len, offs, total_size);
+ }
+ break;
+ case OZ_SET_CONFIG_RSP: {
+ struct oz_set_config_rsp *body =
+ (struct oz_set_config_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, 0, 0);
+ }
+ break;
+ case OZ_SET_INTERFACE_RSP: {
+ struct oz_set_interface_rsp *body =
+ (struct oz_set_interface_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport,
+ body->req_id, body->rcode, 0, 0);
+ }
+ break;
+ case OZ_VENDOR_CLASS_RSP: {
+ struct oz_vendor_class_rsp *body =
+ (struct oz_vendor_class_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, body->data, elt->length-
+ sizeof(struct oz_vendor_class_rsp)+1);
+ }
+ break;
+ case OZ_USB_ENDPOINT_DATA:
+ oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
+ break;
+ }
+done:
+ oz_usb_put(usb_ctx);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq, process
+ */
+void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
+{
+ struct oz_usb_ctx *usb_ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return; /* Context has gone so nothing to do. */
+ if (!usb_ctx->stopped) {
+ oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
+ oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
+ }
+ oz_usb_put(usb_ctx);
+}
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index c60911c6ab3..cac32073814 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
*
- * Information gathered from disassebled dsdt and from here:
+ * Information gathered from disassembled dsdt and from here:
* <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx>
*
* This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,9 @@
*
*/
-#define QUICKSTART_VERSION "1.03"
+#define QUICKSTART_VERSION "1.04"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,118 +39,73 @@ MODULE_AUTHOR("Angelo Arrifano");
MODULE_DESCRIPTION("ACPI Direct App Launch driver");
MODULE_LICENSE("GPL");
-#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
-#define QUICKSTART_ACPI_CLASS "quickstart"
-#define QUICKSTART_ACPI_HID "PNP0C32"
-
-#define QUICKSTART_PF_DRIVER_NAME "quickstart"
-#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-#define QUICKSTART_PF_DEVATTR_NAME "pressed_button"
+#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
+#define QUICKSTART_ACPI_CLASS "quickstart"
+#define QUICKSTART_ACPI_HID "PNP0C32"
-#define QUICKSTART_MAX_BTN_NAME_LEN 16
+#define QUICKSTART_PF_DRIVER_NAME "quickstart"
+#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-/* There will be two events:
- * 0x02 - A hot button was pressed while device was off/sleeping.
- * 0x80 - A hot button was pressed while device was up. */
-#define QUICKSTART_EVENT_WAKE 0x02
-#define QUICKSTART_EVENT_RUNTIME 0x80
+/*
+ * There will be two events:
+ * 0x02 - A hot button was pressed while device was off/sleeping.
+ * 0x80 - A hot button was pressed while device was up.
+ */
+#define QUICKSTART_EVENT_WAKE 0x02
+#define QUICKSTART_EVENT_RUNTIME 0x80
-struct quickstart_btn {
+struct quickstart_button {
char *name;
unsigned int id;
- struct quickstart_btn *next;
+ struct list_head list;
};
-static struct quickstart_driver_data {
- struct quickstart_btn *btn_lst;
- struct quickstart_btn *pressed;
-} quickstart_data;
-
-/* ACPI driver Structs */
struct quickstart_acpi {
struct acpi_device *device;
- struct quickstart_btn *btn;
-};
-static int quickstart_acpi_add(struct acpi_device *device);
-static int quickstart_acpi_remove(struct acpi_device *device, int type);
-static const struct acpi_device_id quickstart_device_ids[] = {
- {QUICKSTART_ACPI_HID, 0},
- {"", 0},
+ struct quickstart_button *button;
};
-static struct acpi_driver quickstart_acpi_driver = {
- .name = "quickstart",
- .class = QUICKSTART_ACPI_CLASS,
- .ids = quickstart_device_ids,
- .ops = {
- .add = quickstart_acpi_add,
- .remove = quickstart_acpi_remove,
- },
-};
+static LIST_HEAD(buttons);
+static struct quickstart_button *pressed;
-/* Input device structs */
-struct input_dev *quickstart_input;
+static struct input_dev *quickstart_input;
-/* Platform driver structs */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_show(struct device *dev,
+/* Platform driver functions */
+static ssize_t quickstart_buttons_show(struct device *dev,
struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count);
-static DEVICE_ATTR(pressed_button, 0666, pressed_button_show,
- pressed_button_store);
-static DEVICE_ATTR(buttons, 0444, buttons_show, NULL);
-static struct platform_device *pf_device;
-static struct platform_driver pf_driver = {
- .driver = {
- .name = QUICKSTART_PF_DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-/*
- * Platform driver functions
- */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ char *buf)
{
int count = 0;
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
+ struct quickstart_button *b;
- if (!ptr)
+ if (list_empty(&buttons))
return snprintf(buf, PAGE_SIZE, "none");
- while (ptr && (count < PAGE_SIZE)) {
- if (ptr->name) {
- count += snprintf(buf + count,
- PAGE_SIZE - count,
- "%d\t%s\n", ptr->id, ptr->name);
+ list_for_each_entry(b, &buttons, list) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u\t%s\n",
+ b->id, b->name);
+
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE;
+ break;
}
- ptr = ptr->next;
}
return count;
}
-static ssize_t pressed_button_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t quickstart_pressed_button_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (quickstart_data.pressed ?
- quickstart_data.pressed->name : "none"));
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ (pressed ? pressed->name : "none"));
}
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t quickstart_pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
if (count < 2)
return -EINVAL;
@@ -156,60 +113,40 @@ static ssize_t pressed_button_store(struct device *dev,
if (strncasecmp(buf, "none", 4) != 0)
return -EINVAL;
- quickstart_data.pressed = NULL;
+ pressed = NULL;
return count;
}
-/* Hotstart Helper functions */
-static int quickstart_btnlst_add(struct quickstart_btn **data)
+/* Helper functions */
+static struct quickstart_button *quickstart_buttons_add(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+ struct quickstart_button *b;
- while (*ptr)
- ptr = &((*ptr)->next);
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
- *ptr = kzalloc(sizeof(struct quickstart_btn), GFP_KERNEL);
- if (!*ptr) {
- *data = NULL;
- return -ENOMEM;
- }
- *data = *ptr;
+ list_add_tail(&b->list, &buttons);
- return 0;
+ return b;
}
-static void quickstart_btnlst_del(struct quickstart_btn *data)
+static void quickstart_button_del(struct quickstart_button *data)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
-
if (!data)
return;
- while (*ptr) {
- if (*ptr == data) {
- *ptr = (*ptr)->next;
- kfree(data);
- return;
- }
- ptr = &((*ptr)->next);
- }
-
- return;
+ list_del(&data->list);
+ kfree(data->name);
+ kfree(data);
}
-static void quickstart_btnlst_free(void)
+static void quickstart_buttons_free(void)
{
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
- struct quickstart_btn *lptr = NULL;
-
- while (ptr) {
- lptr = ptr;
- ptr = ptr->next;
- kfree(lptr->name);
- kfree(lptr);
- }
+ struct quickstart_button *b, *n;
- return;
+ list_for_each_entry_safe(b, n, &buttons, list)
+ quickstart_button_del(b);
}
/* ACPI Driver functions */
@@ -220,107 +157,137 @@ static void quickstart_acpi_notify(acpi_handle handle, u32 event, void *data)
if (!quickstart)
return;
- if (event == QUICKSTART_EVENT_WAKE)
- quickstart_data.pressed = quickstart->btn;
- else if (event == QUICKSTART_EVENT_RUNTIME) {
- input_report_key(quickstart_input, quickstart->btn->id, 1);
+ switch (event) {
+ case QUICKSTART_EVENT_WAKE:
+ pressed = quickstart->button;
+ break;
+ case QUICKSTART_EVENT_RUNTIME:
+ input_report_key(quickstart_input, quickstart->button->id, 1);
input_sync(quickstart_input);
- input_report_key(quickstart_input, quickstart->btn->id, 0);
+ input_report_key(quickstart_input, quickstart->button->id, 0);
input_sync(quickstart_input);
+ break;
+ default:
+ pr_err("Unexpected ACPI event notify (%u)\n", event);
+ break;
}
- return;
}
-static void quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
+static int quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- uint32_t usageid = 0;
-
- if (!quickstart)
- return;
+ int ret = 0;
- /* This returns a buffer telling the button usage ID,
- * and triggers pending notify events (The ones before booting). */
- status = acpi_evaluate_object(quickstart->device->handle,
- "GHID", NULL, &buffer);
- if (ACPI_FAILURE(status) || !buffer.pointer) {
- printk(KERN_ERR "quickstart: %s GHID method failed.\n",
- quickstart->btn->name);
- return;
+ /*
+ * This returns a buffer telling the button usage ID,
+ * and triggers pending notify events (The ones before booting).
+ */
+ status = acpi_evaluate_object(quickstart->device->handle, "GHID", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s GHID method failed\n", quickstart->button->name);
+ return -EINVAL;
}
- if (buffer.length < 8)
- return;
-
- /* <<The GHID method can return a BYTE, WORD, or DWORD.
+ /*
+ * <<The GHID method can return a BYTE, WORD, or DWORD.
* The value must be encoded in little-endian byte
- * order (least significant byte first).>> */
- usageid = *((uint32_t *)(buffer.pointer + (buffer.length - 8)));
- quickstart->btn->id = usageid;
+ * order (least significant byte first).>>
+ */
+ switch (buffer.length) {
+ case 1:
+ quickstart->button->id = *(uint8_t *)buffer.pointer;
+ break;
+ case 2:
+ quickstart->button->id = *(uint16_t *)buffer.pointer;
+ break;
+ case 4:
+ quickstart->button->id = *(uint32_t *)buffer.pointer;
+ break;
+ case 8:
+ quickstart->button->id = *(uint64_t *)buffer.pointer;
+ break;
+ default:
+ pr_err("%s GHID method returned buffer of unexpected length %lu\n",
+ quickstart->button->name,
+ (unsigned long)buffer.length);
+ ret = -EINVAL;
+ break;
+ }
kfree(buffer.pointer);
+
+ return ret;
}
-static int quickstart_acpi_config(struct quickstart_acpi *quickstart, char *bid)
+static int quickstart_acpi_config(struct quickstart_acpi *quickstart)
{
- int len = strlen(bid);
- int ret;
+ char *bid = acpi_device_bid(quickstart->device);
+ char *name;
- /* Add button to list */
- ret = quickstart_btnlst_add(&quickstart->btn);
- if (ret)
- return ret;
+ name = kmalloc(strlen(bid) + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- quickstart->btn->name = kzalloc(len + 1, GFP_KERNEL);
- if (!quickstart->btn->name) {
- quickstart_btnlst_free();
+ /* Add new button to list */
+ quickstart->button = quickstart_buttons_add();
+ if (!quickstart->button) {
+ kfree(name);
return -ENOMEM;
}
- strcpy(quickstart->btn->name, bid);
+
+ quickstart->button->name = name;
+ strcpy(quickstart->button->name, bid);
return 0;
}
static int quickstart_acpi_add(struct acpi_device *device)
{
- int ret = 0;
- acpi_status status = AE_OK;
- struct quickstart_acpi *quickstart = NULL;
+ int ret;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
if (!device)
return -EINVAL;
- quickstart = kzalloc(sizeof(struct quickstart_acpi), GFP_KERNEL);
+ quickstart = kzalloc(sizeof(*quickstart), GFP_KERNEL);
if (!quickstart)
return -ENOMEM;
quickstart->device = device;
+
strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS);
device->driver_data = quickstart;
/* Add button to list and initialize some stuff */
- ret = quickstart_acpi_config(quickstart, acpi_device_bid(device));
- if (ret)
+ ret = quickstart_acpi_config(quickstart);
+ if (ret < 0)
goto fail_config;
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
+ status = acpi_install_notify_handler(device->handle, ACPI_ALL_NOTIFY,
quickstart_acpi_notify,
quickstart);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "quickstart: Notify handler install error\n");
+ pr_err("Notify handler install error\n");
ret = -ENODEV;
goto fail_installnotify;
}
- quickstart_acpi_ghid(quickstart);
+ ret = quickstart_acpi_ghid(quickstart);
+ if (ret < 0)
+ goto fail_ghid;
return 0;
+fail_ghid:
+ acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
+
fail_installnotify:
- quickstart_btnlst_del(quickstart->btn);
+ quickstart_button_del(quickstart->button);
fail_config:
@@ -331,28 +298,54 @@ fail_config:
static int quickstart_acpi_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
- struct quickstart_acpi *quickstart = NULL;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
quickstart = acpi_driver_data(device);
+ if (!quickstart)
+ return -EINVAL;
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- quickstart_acpi_notify);
+ status = acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
if (ACPI_FAILURE(status))
- printk(KERN_ERR "quickstart: Error removing notify handler\n");
-
+ pr_err("Error removing notify handler\n");
kfree(quickstart);
return 0;
}
-/* Module functions */
+/* Platform driver structs */
+static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
+ quickstart_pressed_button_store);
+static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
+static struct platform_device *pf_device;
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = QUICKSTART_PF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static const struct acpi_device_id quickstart_device_ids[] = {
+ {QUICKSTART_ACPI_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver quickstart_acpi_driver = {
+ .name = "quickstart",
+ .class = QUICKSTART_ACPI_CLASS,
+ .ids = quickstart_device_ids,
+ .ops = {
+ .add = quickstart_acpi_add,
+ .remove = quickstart_acpi_remove,
+ },
+};
+/* Module functions */
static void quickstart_exit(void)
{
input_unregister_device(quickstart_input);
@@ -366,15 +359,12 @@ static void quickstart_exit(void)
acpi_bus_unregister_driver(&quickstart_acpi_driver);
- quickstart_btnlst_free();
-
- return;
+ quickstart_buttons_free();
}
static int __init quickstart_init_input(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
- int count;
+ struct quickstart_button *b;
int ret;
quickstart_input = input_allocate_device();
@@ -385,11 +375,9 @@ static int __init quickstart_init_input(void)
quickstart_input->name = "Quickstart ACPI Buttons";
quickstart_input->id.bustype = BUS_HOST;
- while (*ptr) {
- count++;
+ list_for_each_entry(b, &buttons, list) {
set_bit(EV_KEY, quickstart_input->evbit);
- set_bit((*ptr)->id, quickstart_input->keybit);
- ptr = &((*ptr)->next);
+ set_bit(b->id, quickstart_input->keybit);
}
ret = input_register_device(quickstart_input);
@@ -415,7 +403,7 @@ static int __init quickstart_init(void)
return ret;
/* If existing bus with no devices */
- if (!quickstart_data.btn_lst) {
+ if (list_empty(&buttons)) {
ret = -ENODEV;
goto fail_pfdrv_reg;
}
@@ -444,14 +432,12 @@ static int __init quickstart_init(void)
if (ret)
goto fail_dev_file2;
-
/* Input device */
ret = quickstart_init_input();
if (ret)
goto fail_input;
- printk(KERN_INFO "quickstart: ACPI Direct App Launch ver %s\n",
- QUICKSTART_VERSION);
+ pr_info("ACPI Direct App Launch ver %s\n", QUICKSTART_VERSION);
return 0;
fail_input:
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
new file mode 100644
index 00000000000..8b57b87edda
--- /dev/null
+++ b/drivers/staging/ramster/Kconfig
@@ -0,0 +1,17 @@
+# Dependency on CONFIG_BROKEN is because there is a commit dependency
+# on a cleancache naming change to be submitted by Konrad Wilk
+# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
+# into linux-next. Once this commit is present, BROKEN can be removed
+config RAMSTER
+ bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
+ depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default n
+ help
+ RAMster allows RAM on other machines in a cluster to be utilized
+ dynamically and symmetrically instead of swapping to a local swap
+ disk, thus improving performance on memory-constrained workloads
+ while minimizing total RAM across the cluster. RAMster, like
+ zcache, compresses swap pages into local RAM, but then remotifies
+ the compressed pages to another node in the RAMster cluster.
diff --git a/drivers/staging/ramster/Makefile b/drivers/staging/ramster/Makefile
new file mode 100644
index 00000000000..bcc13c87f99
--- /dev/null
+++ b/drivers/staging/ramster/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o r2net.o xvmalloc.o cluster/
diff --git a/drivers/staging/ramster/TODO b/drivers/staging/ramster/TODO
new file mode 100644
index 00000000000..46fcf0c58ac
--- /dev/null
+++ b/drivers/staging/ramster/TODO
@@ -0,0 +1,13 @@
+For this staging driver, RAMster duplicates code from drivers/staging/zcache
+then incorporates changes to the local copy of the code. For V5, it also
+directly incorporates the soon-to-be-removed drivers/staging/zram/xvmalloc.[ch]
+as all testing has been done with xvmalloc rather than the new zsmalloc.
+Before RAMster can be promoted from staging, the zcache and RAMster drivers
+should be either merged or reorganized to separate out common code.
+
+Until V4, RAMster duplicated code from fs/ocfs2/cluster, but this made
+RAMster incompatible with ocfs2 running in the same kernel and included
+lots of code that could be removed. As of V5, the ocfs2 code has been
+mined and made RAMster-specific, made to communicate with a userland
+ramster-tools package rather than ocfs2-tools, and can co-exist with ocfs2
+both in the same kernel and in userland on the same machine.
diff --git a/drivers/staging/ramster/cluster/Makefile b/drivers/staging/ramster/cluster/Makefile
new file mode 100644
index 00000000000..9c6943652c0
--- /dev/null
+++ b/drivers/staging/ramster/cluster/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_RAMSTER) += ramster_nodemanager.o
+
+ramster_nodemanager-objs := heartbeat.o masklog.o nodemanager.o tcp.o
diff --git a/drivers/staging/ramster/cluster/heartbeat.c b/drivers/staging/ramster/cluster/heartbeat.c
new file mode 100644
index 00000000000..00209490756
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.c
@@ -0,0 +1,464 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+
+#include "masklog.h"
+
+/*
+ * The first heartbeat pass had one global thread that would serialize all hb
+ * callback calls. This global serializing sem should only be removed once
+ * we've made sure that all callees can deal with being called concurrently
+ * from multiple hb region threads.
+ */
+static DECLARE_RWSEM(r2hb_callback_sem);
+
+/*
+ * multiple hb threads are watching multiple regions. A node is live
+ * whenever any of the threads sees activity from the node in its region.
+ */
+static DEFINE_SPINLOCK(r2hb_live_lock);
+static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
+
+static struct r2hb_callback {
+ struct list_head list;
+} r2hb_callbacks[R2HB_NUM_CB];
+
+enum r2hb_heartbeat_modes {
+ R2HB_HEARTBEAT_LOCAL = 0,
+ R2HB_HEARTBEAT_GLOBAL,
+ R2HB_HEARTBEAT_NUM_MODES,
+};
+
+char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
+ "local", /* R2HB_HEARTBEAT_LOCAL */
+ "global", /* R2HB_HEARTBEAT_GLOBAL */
+};
+
+unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
+unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
+
+/* Only sets a new threshold if there are no active regions.
+ *
+ * No locking or otherwise interesting code is required for reading
+ * r2hb_dead_threshold as it can't change once regions are active and
+ * it's not interesting to anyone until then anyway. */
+static void r2hb_dead_threshold_set(unsigned int threshold)
+{
+ if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
+ spin_lock(&r2hb_live_lock);
+ r2hb_dead_threshold = threshold;
+ spin_unlock(&r2hb_live_lock);
+ }
+}
+
+static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
+{
+ int ret = -1;
+
+ if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
+ spin_lock(&r2hb_live_lock);
+ r2hb_heartbeat_mode = hb_mode;
+ ret = 0;
+ spin_unlock(&r2hb_live_lock);
+ }
+
+ return ret;
+}
+
+void r2hb_exit(void)
+{
+}
+
+int r2hb_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
+ INIT_LIST_HEAD(&r2hb_callbacks[i].list);
+
+ memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
+
+ return 0;
+}
+
+/* if we're already in a callback then we're already serialized by the sem */
+static void r2hb_fill_node_map_from_callback(unsigned long *map,
+ unsigned bytes)
+{
+ BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memcpy(map, &r2hb_live_node_bitmap, bytes);
+}
+
+/*
+ * get a map of all nodes that are heartbeating in any regions
+ */
+void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ /* callers want to serialize this map and callbacks so that they
+ * can trust that they don't miss nodes coming to the party */
+ down_read(&r2hb_callback_sem);
+ spin_lock(&r2hb_live_lock);
+ r2hb_fill_node_map_from_callback(map, bytes);
+ spin_unlock(&r2hb_live_lock);
+ up_read(&r2hb_callback_sem);
+}
+EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
+
+/*
+ * heartbeat configfs bits. The heartbeat set is a default set under
+ * the cluster set in nodemanager.c.
+ */
+
+/* heartbeat set */
+
+struct r2hb_hb_group {
+ struct config_group hs_group;
+ /* some stuff? */
+};
+
+static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct r2hb_hb_group, hs_group)
+ : NULL;
+}
+
+static struct config_item r2hb_config_item;
+
+static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
+ const char *name)
+{
+ int ret;
+
+ if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
+ ret = -ENAMETOOLONG;
+ goto free;
+ }
+
+ config_item_put(&r2hb_config_item);
+
+ return &r2hb_config_item;
+free:
+ return ERR_PTR(ret);
+}
+
+static void r2hb_hb_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ if (r2hb_global_heartbeat_active()) {
+ printk(KERN_NOTICE "ramster: Heartbeat %s "
+ "on region %s (%s)\n",
+ "stopped/aborted", config_item_name(item),
+ "no region");
+ }
+
+ config_item_put(item);
+}
+
+struct r2hb_hb_group_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2hb_hb_group *, char *);
+ ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
+};
+
+static ssize_t r2hb_hb_group_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
+ struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
+ container_of(attr, struct r2hb_hb_group_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2hb_hb_group_attr->show)
+ ret = r2hb_hb_group_attr->show(reg, page);
+ return ret;
+}
+
+static ssize_t r2hb_hb_group_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
+ struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
+ container_of(attr, struct r2hb_hb_group_attribute, attr);
+ ssize_t ret = -EINVAL;
+
+ if (r2hb_hb_group_attr->store)
+ ret = r2hb_hb_group_attr->store(reg, page, count);
+ return ret;
+}
+
+static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
+ char *page)
+{
+ return sprintf(page, "%u\n", r2hb_dead_threshold);
+}
+
+static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ /* this will validate ranges for us. */
+ r2hb_dead_threshold_set((unsigned int) tmp);
+
+ return count;
+}
+
+static
+ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
+ char *page)
+{
+ return sprintf(page, "%s\n",
+ r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
+}
+
+static
+ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
+ const char *page, size_t count)
+{
+ unsigned int i;
+ int ret;
+ size_t len;
+
+ len = (page[count - 1] == '\n') ? count - 1 : count;
+ if (!len)
+ return -EINVAL;
+
+ for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
+ if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
+ continue;
+
+ ret = r2hb_global_hearbeat_mode_set(i);
+ if (!ret)
+ printk(KERN_NOTICE "ramster: Heartbeat mode "
+ "set to %s\n",
+ r2hb_heartbeat_mode_desc[i]);
+ return count;
+ }
+
+ return -EINVAL;
+
+}
+
+static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "dead_threshold",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2hb_hb_group_threshold_show,
+ .store = r2hb_hb_group_threshold_store,
+};
+
+static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "mode",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2hb_hb_group_mode_show,
+ .store = r2hb_hb_group_mode_store,
+};
+
+static struct configfs_attribute *r2hb_hb_group_attrs[] = {
+ &r2hb_hb_group_attr_threshold.attr,
+ &r2hb_hb_group_attr_mode.attr,
+ NULL,
+};
+
+static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
+ .show_attribute = r2hb_hb_group_show,
+ .store_attribute = r2hb_hb_group_store,
+};
+
+static struct configfs_group_operations r2hb_hb_group_group_ops = {
+ .make_item = r2hb_hb_group_make_item,
+ .drop_item = r2hb_hb_group_drop_item,
+};
+
+static struct config_item_type r2hb_hb_group_type = {
+ .ct_group_ops = &r2hb_hb_group_group_ops,
+ .ct_item_ops = &r2hb_hearbeat_group_item_ops,
+ .ct_attrs = r2hb_hb_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* this is just here to avoid touching group in heartbeat.h which the
+ * entire damn world #includes */
+struct config_group *r2hb_alloc_hb_set(void)
+{
+ struct r2hb_hb_group *hs = NULL;
+ struct config_group *ret = NULL;
+
+ hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
+ if (hs == NULL)
+ goto out;
+
+ config_group_init_type_name(&hs->hs_group, "heartbeat",
+ &r2hb_hb_group_type);
+
+ ret = &hs->hs_group;
+out:
+ if (ret == NULL)
+ kfree(hs);
+ return ret;
+}
+
+void r2hb_free_hb_set(struct config_group *group)
+{
+ struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
+ kfree(hs);
+}
+
+/* hb callback registration and issuing */
+
+static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
+{
+ if (type == R2HB_NUM_CB)
+ return ERR_PTR(-EINVAL);
+
+ return &r2hb_callbacks[type];
+}
+
+void r2hb_setup_callback(struct r2hb_callback_func *hc,
+ enum r2hb_callback_type type,
+ r2hb_cb_func *func,
+ void *data,
+ int priority)
+{
+ INIT_LIST_HEAD(&hc->hc_item);
+ hc->hc_func = func;
+ hc->hc_data = data;
+ hc->hc_priority = priority;
+ hc->hc_type = type;
+ hc->hc_magic = R2HB_CB_MAGIC;
+}
+EXPORT_SYMBOL_GPL(r2hb_setup_callback);
+
+int r2hb_register_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc)
+{
+ struct r2hb_callback_func *tmp;
+ struct list_head *iter;
+ struct r2hb_callback *hbcall;
+ int ret;
+
+ BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
+ BUG_ON(!list_empty(&hc->hc_item));
+
+ hbcall = hbcall_from_type(hc->hc_type);
+ if (IS_ERR(hbcall)) {
+ ret = PTR_ERR(hbcall);
+ goto out;
+ }
+
+ down_write(&r2hb_callback_sem);
+
+ list_for_each(iter, &hbcall->list) {
+ tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
+ if (hc->hc_priority < tmp->hc_priority) {
+ list_add_tail(&hc->hc_item, iter);
+ break;
+ }
+ }
+ if (list_empty(&hc->hc_item))
+ list_add_tail(&hc->hc_item, &hbcall->list);
+
+ up_write(&r2hb_callback_sem);
+ ret = 0;
+out:
+ mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
+ ret, __builtin_return_address(0), hc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2hb_register_callback);
+
+void r2hb_unregister_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc)
+{
+ BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
+
+ mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
+ __builtin_return_address(0), hc);
+
+ /* XXX Can this happen _with_ a region reference? */
+ if (list_empty(&hc->hc_item))
+ return;
+
+ down_write(&r2hb_callback_sem);
+
+ list_del_init(&hc->hc_item);
+
+ up_write(&r2hb_callback_sem);
+}
+EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
+
+int r2hb_check_node_heartbeating_from_callback(u8 node_num)
+{
+ unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
+
+ r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
+ if (!test_bit(node_num, testing_map)) {
+ mlog(ML_HEARTBEAT,
+ "node (%u) does not have heartbeating enabled.\n",
+ node_num);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
+
+void r2hb_stop_all_regions(void)
+{
+}
+EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
+
+/*
+ * this is just a hack until we get the plumbing which flips file systems
+ * read only and drops the hb ref instead of killing the node dead.
+ */
+int r2hb_global_heartbeat_active(void)
+{
+ return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
+}
+EXPORT_SYMBOL(r2hb_global_heartbeat_active);
+
+/* added for RAMster */
+void r2hb_manual_set_node_heartbeating(int node_num)
+{
+ if (node_num < R2NM_MAX_NODES)
+ set_bit(node_num, r2hb_live_node_bitmap);
+}
+EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
diff --git a/drivers/staging/ramster/cluster/heartbeat.h b/drivers/staging/ramster/cluster/heartbeat.h
new file mode 100644
index 00000000000..6cbc775bd63
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.h
@@ -0,0 +1,87 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * heartbeat.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_HEARTBEAT_H
+#define R2CLUSTER_HEARTBEAT_H
+
+#define R2HB_REGION_TIMEOUT_MS 2000
+
+#define R2HB_MAX_REGION_NAME_LEN 32
+
+/* number of changes to be seen as live */
+#define R2HB_LIVE_THRESHOLD 2
+/* number of equal samples to be seen as dead */
+extern unsigned int r2hb_dead_threshold;
+#define R2HB_DEFAULT_DEAD_THRESHOLD 31
+/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
+#define R2HB_MIN_DEAD_THRESHOLD 2
+#define R2HB_MAX_WRITE_TIMEOUT_MS \
+ (R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
+
+#define R2HB_CB_MAGIC 0x51d1e4ec
+
+/* callback stuff */
+enum r2hb_callback_type {
+ R2HB_NODE_DOWN_CB = 0,
+ R2HB_NODE_UP_CB,
+ R2HB_NUM_CB
+};
+
+struct r2nm_node;
+typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
+
+struct r2hb_callback_func {
+ u32 hc_magic;
+ struct list_head hc_item;
+ r2hb_cb_func *hc_func;
+ void *hc_data;
+ int hc_priority;
+ enum r2hb_callback_type hc_type;
+};
+
+struct config_group *r2hb_alloc_hb_set(void);
+void r2hb_free_hb_set(struct config_group *group);
+
+void r2hb_setup_callback(struct r2hb_callback_func *hc,
+ enum r2hb_callback_type type,
+ r2hb_cb_func *func,
+ void *data,
+ int priority);
+int r2hb_register_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc);
+void r2hb_unregister_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc);
+void r2hb_fill_node_map(unsigned long *map,
+ unsigned bytes);
+void r2hb_exit(void);
+int r2hb_init(void);
+int r2hb_check_node_heartbeating_from_callback(u8 node_num);
+void r2hb_stop_all_regions(void);
+int r2hb_get_all_regions(char *region_uuids, u8 numregions);
+int r2hb_global_heartbeat_active(void);
+void r2hb_manual_set_node_heartbeating(int);
+
+#endif /* R2CLUSTER_HEARTBEAT_H */
diff --git a/drivers/staging/ramster/cluster/masklog.c b/drivers/staging/ramster/cluster/masklog.c
new file mode 100644
index 00000000000..1261d8579aa
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.c
@@ -0,0 +1,155 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#include "masklog.h"
+
+struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
+EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
+struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
+EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
+
+static ssize_t mlog_mask_show(u64 mask, char *buf)
+{
+ char *state;
+
+ if (__mlog_test_u64(mask, r2_mlog_and_bits))
+ state = "allow";
+ else if (__mlog_test_u64(mask, r2_mlog_not_bits))
+ state = "deny";
+ else
+ state = "off";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
+{
+ if (!strnicmp(buf, "allow", 5)) {
+ __mlog_set_u64(mask, r2_mlog_and_bits);
+ __mlog_clear_u64(mask, r2_mlog_not_bits);
+ } else if (!strnicmp(buf, "deny", 4)) {
+ __mlog_set_u64(mask, r2_mlog_not_bits);
+ __mlog_clear_u64(mask, r2_mlog_and_bits);
+ } else if (!strnicmp(buf, "off", 3)) {
+ __mlog_clear_u64(mask, r2_mlog_not_bits);
+ __mlog_clear_u64(mask, r2_mlog_and_bits);
+ } else
+ return -EINVAL;
+
+ return count;
+}
+
+struct mlog_attribute {
+ struct attribute attr;
+ u64 mask;
+};
+
+#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
+
+#define define_mask(_name) { \
+ .attr = { \
+ .name = #_name, \
+ .mode = S_IRUGO | S_IWUSR, \
+ }, \
+ .mask = ML_##_name, \
+}
+
+static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
+ define_mask(TCP),
+ define_mask(MSG),
+ define_mask(SOCKET),
+ define_mask(HEARTBEAT),
+ define_mask(HB_BIO),
+ define_mask(DLMFS),
+ define_mask(DLM),
+ define_mask(DLM_DOMAIN),
+ define_mask(DLM_THREAD),
+ define_mask(DLM_MASTER),
+ define_mask(DLM_RECOVERY),
+ define_mask(DLM_GLUE),
+ define_mask(VOTE),
+ define_mask(CONN),
+ define_mask(QUORUM),
+ define_mask(BASTS),
+ define_mask(CLUSTER),
+ define_mask(ERROR),
+ define_mask(NOTICE),
+ define_mask(KTHREAD),
+};
+
+static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+
+static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
+ char *buf)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_show(mlog_attr->mask, buf);
+}
+
+static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_store(mlog_attr->mask, buf, count);
+}
+
+static const struct sysfs_ops mlog_attr_ops = {
+ .show = mlog_show,
+ .store = mlog_store,
+};
+
+static struct kobj_type mlog_ktype = {
+ .default_attrs = mlog_attr_ptrs,
+ .sysfs_ops = &mlog_attr_ops,
+};
+
+static struct kset mlog_kset = {
+ .kobj = {.ktype = &mlog_ktype},
+};
+
+int r2_mlog_sys_init(struct kset *r2cb_kset)
+{
+ int i = 0;
+
+ while (mlog_attrs[i].attr.mode) {
+ mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
+ i++;
+ }
+ mlog_attr_ptrs[i] = NULL;
+
+ kobject_set_name(&mlog_kset.kobj, "logmask");
+ mlog_kset.kobj.kset = r2cb_kset;
+ return kset_register(&mlog_kset);
+}
+
+void r2_mlog_sys_shutdown(void)
+{
+ kset_unregister(&mlog_kset);
+}
diff --git a/drivers/staging/ramster/cluster/masklog.h b/drivers/staging/ramster/cluster/masklog.h
new file mode 100644
index 00000000000..918ae110b69
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.h
@@ -0,0 +1,220 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef R2CLUSTER_MASKLOG_H
+#define R2CLUSTER_MASKLOG_H
+
+/*
+ * For now this is a trivial wrapper around printk() that gives the critical
+ * ability to enable sets of debugging output at run-time. In the future this
+ * will almost certainly be redirected to relayfs so that it can pay a
+ * substantially lower heisenberg tax.
+ *
+ * Callers associate the message with a bitmask and a global bitmask is
+ * maintained with help from /proc. If any of the bits match the message is
+ * output.
+ *
+ * We must have efficient bit tests on i386 and it seems gcc still emits crazy
+ * code for the 64bit compare. It emits very good code for the dual unsigned
+ * long tests, though, completely avoiding tests that can never pass if the
+ * caller gives a constant bitmask that fills one of the longs with all 0s. So
+ * the desire is to have almost all of the calls decided on by comparing just
+ * one of the longs. This leads to having infrequently given bits that are
+ * frequently matched in the high bits.
+ *
+ * _ERROR and _NOTICE are used for messages that always go to the console and
+ * have appropriate KERN_ prefixes. We wrap these in our function instead of
+ * just calling printk() so that this can eventually make its way through
+ * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
+ * The inline tests and macro dance give GCC the opportunity to quite cleverly
+ * only emit the appropriage printk() when the caller passes in a constant
+ * mask, as is almost always the case.
+ *
+ * All this bitmask nonsense is managed from the files under
+ * /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
+ * indication of which bits are allowed (allow) or denied (off/deny).
+ * ENTRY deny
+ * EXIT deny
+ * TCP off
+ * MSG off
+ * SOCKET off
+ * ERROR allow
+ * NOTICE allow
+ *
+ * Writing changes the state of a given bit and requires a strictly formatted
+ * single write() call:
+ *
+ * write(fd, "allow", 5);
+ *
+ * Echoing allow/deny/off string into the logmask files can flip the bits
+ * on or off as expected; here is the bash script for example:
+ *
+ * log_mask="/sys/fs/r2cb/log_mask"
+ * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
+ * echo allow >"$log_mask"/"$node"
+ * done
+ *
+ * The debugfs.ramster tool can also flip the bits with the -l option:
+ *
+ * debugfs.ramster -l TCP allow
+ */
+
+/* for task_struct */
+#include <linux/sched.h>
+
+/* bits that are frequently given and infrequently matched in the low word */
+/* NOTE: If you add a flag, you need to also update masklog.c! */
+#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
+#define ML_MSG 0x0000000000000002ULL /* net network messages */
+#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
+#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
+#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
+#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
+#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
+#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
+#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
+#define ML_CONN 0x0000000000002000ULL /* net connection management */
+#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
+#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
+#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
+
+/* bits that are infrequently given and frequently matched in the high word */
+#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
+#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
+#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
+
+#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
+#ifndef MLOG_MASK_PREFIX
+#define MLOG_MASK_PREFIX 0
+#endif
+
+/*
+ * When logging is disabled, force the bit test to 0 for anything other
+ * than errors and notices, allowing gcc to remove the code completely.
+ * When enabled, allow all masks.
+ */
+#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
+#define ML_ALLOWED_BITS (~0)
+#else
+#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
+#endif
+
+#define MLOG_MAX_BITS 64
+
+struct mlog_bits {
+ unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
+};
+
+extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
+
+#if BITS_PER_LONG == 32
+
+#define __mlog_test_u64(mask, bits) \
+ ((u32)(mask & 0xffffffff) & bits.words[0] || \
+ ((u64)(mask) >> 32) & bits.words[1])
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (u32)(mask & 0xffffffff); \
+ bits.words[1] |= (u64)(mask) >> 32; \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
+ bits.words[1] &= ~((u64)(mask) >> 32); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { \
+ { \
+ [0] = (u32)(mask & 0xffffffff), \
+ [1] = (u64)(mask) >> 32, \
+ } \
+}
+
+#else /* 32bit long above, 64bit long below */
+
+#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (mask); \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~(mask); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { { (mask) } }
+
+#endif
+
+/*
+ * smp_processor_id() "helpfully" screams when called outside preemptible
+ * regions in current kernels. sles doesn't have the variants that don't
+ * scream. just do this instead of trying to guess which we're building
+ * against.. *sigh*.
+ */
+#define __mlog_cpu_guess ({ \
+ unsigned long _cpu = get_cpu(); \
+ put_cpu(); \
+ _cpu; \
+})
+
+/* In the following two macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define __mlog_printk(level, fmt, args...) \
+ printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
+ task_pid_nr(current), __mlog_cpu_guess, \
+ __PRETTY_FUNCTION__, __LINE__ , ##args)
+
+#define mlog(mask, fmt, args...) do { \
+ u64 __m = MLOG_MASK_PREFIX | (mask); \
+ if ((__m & ML_ALLOWED_BITS) && \
+ __mlog_test_u64(__m, r2_mlog_and_bits) && \
+ !__mlog_test_u64(__m, r2_mlog_not_bits)) { \
+ if (__m & ML_ERROR) \
+ __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
+ else if (__m & ML_NOTICE) \
+ __mlog_printk(KERN_NOTICE, fmt , ##args); \
+ else \
+ __mlog_printk(KERN_INFO, fmt , ##args); \
+ } \
+} while (0)
+
+#define mlog_errno(st) do { \
+ int _st = (st); \
+ if (_st != -ERESTARTSYS && _st != -EINTR && \
+ _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
+ mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
+} while (0)
+
+#define mlog_bug_on_msg(cond, fmt, args...) do { \
+ if (cond) { \
+ mlog(ML_ERROR, "bug expression: " #cond "\n"); \
+ mlog(ML_ERROR, fmt, ##args); \
+ BUG(); \
+ } \
+} while (0)
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+int r2_mlog_sys_init(struct kset *r2cb_subsys);
+void r2_mlog_sys_shutdown(void);
+
+#endif /* R2CLUSTER_MASKLOG_H */
diff --git a/drivers/staging/ramster/cluster/nodemanager.c b/drivers/staging/ramster/cluster/nodemanager.c
new file mode 100644
index 00000000000..de0e5c8da6e
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.c
@@ -0,0 +1,992 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+
+#include "tcp.h"
+#include "nodemanager.h"
+#include "heartbeat.h"
+#include "masklog.h"
+
+/* for now we operate under the assertion that there can be only one
+ * cluster active at a time. Changing this will require trickling
+ * cluster references throughout where nodes are looked up */
+struct r2nm_cluster *r2nm_single_cluster;
+
+char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
+ "reset", /* R2NM_FENCE_RESET */
+ "panic", /* R2NM_FENCE_PANIC */
+};
+
+struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
+{
+ struct r2nm_node *node = NULL;
+
+ if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
+ goto out;
+
+ read_lock(&r2nm_single_cluster->cl_nodes_lock);
+ node = r2nm_single_cluster->cl_nodes[node_num];
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&r2nm_single_cluster->cl_nodes_lock);
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
+
+int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
+{
+ struct r2nm_cluster *cluster = r2nm_single_cluster;
+
+ BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
+
+ if (cluster == NULL)
+ return -EINVAL;
+
+ read_lock(&cluster->cl_nodes_lock);
+ memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
+ read_unlock(&cluster->cl_nodes_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
+
+static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
+ __be32 ip_needle,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct r2nm_node *node, *ret = NULL;
+
+ while (*p) {
+ int cmp;
+
+ parent = *p;
+ node = rb_entry(parent, struct r2nm_node, nd_ip_node);
+
+ cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle));
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = node;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
+{
+ struct r2nm_node *node = NULL;
+ struct r2nm_cluster *cluster = r2nm_single_cluster;
+
+ if (cluster == NULL)
+ goto out;
+
+ read_lock(&cluster->cl_nodes_lock);
+ node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&cluster->cl_nodes_lock);
+
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
+
+void r2nm_node_put(struct r2nm_node *node)
+{
+ config_item_put(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(r2nm_node_put);
+
+void r2nm_node_get(struct r2nm_node *node)
+{
+ config_item_get(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(r2nm_node_get);
+
+u8 r2nm_this_node(void)
+{
+ u8 node_num = R2NM_MAX_NODES;
+
+ if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
+ node_num = r2nm_single_cluster->cl_local_node;
+
+ return node_num;
+}
+EXPORT_SYMBOL_GPL(r2nm_this_node);
+
+/* node configfs bits */
+
+static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
+{
+ return item ?
+ container_of(to_config_group(item), struct r2nm_cluster,
+ cl_group)
+ : NULL;
+}
+
+static struct r2nm_node *to_r2nm_node(struct config_item *item)
+{
+ return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
+}
+
+static void r2nm_node_release(struct config_item *item)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ kfree(node);
+}
+
+static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_num);
+}
+
+static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
+{
+ /* through the first node_set .parent
+ * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
+ return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
+}
+
+enum {
+ R2NM_NODE_ATTR_NUM = 0,
+ R2NM_NODE_ATTR_PORT,
+ R2NM_NODE_ATTR_ADDRESS,
+ R2NM_NODE_ATTR_LOCAL,
+};
+
+static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp >= R2NM_MAX_NODES)
+ return -ERANGE;
+
+ /* once we're in the cl_nodes tree networking can look us up by
+ * node number and try to use our address and port attributes
+ * to connect to this node.. make sure that they've been set
+ * before writing the node attribute? */
+ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ write_lock(&cluster->cl_nodes_lock);
+ if (cluster->cl_nodes[tmp])
+ p = NULL;
+ else {
+ cluster->cl_nodes[tmp] = node;
+ node->nd_num = tmp;
+ set_bit(tmp, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (p == NULL)
+ return -EEXIST;
+
+ return count;
+}
+static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
+}
+
+static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
+ const char *page, size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp == 0)
+ return -EINVAL;
+ if (tmp >= (u16)-1)
+ return -ERANGE;
+
+ node->nd_ipv4_port = htons(tmp);
+
+ return count;
+}
+
+static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
+}
+
+static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
+ const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ int ret, i;
+ struct rb_node **p, *parent;
+ unsigned int octets[4];
+ __be32 ipv4_addr = 0;
+
+ ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
+ &octets[1], &octets[0]);
+ if (ret != 4)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(octets); i++) {
+ if (octets[i] > 255)
+ return -ERANGE;
+ be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
+ }
+
+ ret = 0;
+ write_lock(&cluster->cl_nodes_lock);
+ if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&node->nd_ip_node, parent, p);
+ rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (ret)
+ return ret;
+
+ memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
+
+ return count;
+}
+
+static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_local);
+}
+
+static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+ ssize_t ret;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ tmp = !!tmp; /* boolean of whether this node wants to be local */
+
+ /* setting local turns on networking rx for now so we require having
+ * set everything else first */
+ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ /* the only failure case is trying to set a new local node
+ * when a different one is already set */
+ if (tmp && tmp == cluster->cl_has_local &&
+ cluster->cl_local_node != node->nd_num)
+ return -EBUSY;
+
+ /* bring up the rx thread if we're setting the new local node. */
+ if (tmp && !cluster->cl_has_local) {
+ ret = r2net_start_listening(node);
+ if (ret)
+ return ret;
+ }
+
+ if (!tmp && cluster->cl_has_local &&
+ cluster->cl_local_node == node->nd_num) {
+ r2net_stop_listening(node);
+ cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
+ }
+
+ node->nd_local = tmp;
+ if (node->nd_local) {
+ cluster->cl_has_local = tmp;
+ cluster->cl_local_node = node->nd_num;
+ }
+
+ return count;
+}
+
+struct r2nm_node_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2nm_node *, char *);
+ ssize_t (*store)(struct r2nm_node *, const char *, size_t);
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_num = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "num",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_num_read,
+ .store = r2nm_node_num_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_port",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_ipv4_port_read,
+ .store = r2nm_node_ipv4_port_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_address",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_ipv4_address_read,
+ .store = r2nm_node_ipv4_address_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_local = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "local",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_local_read,
+ .store = r2nm_node_local_write,
+};
+
+static struct configfs_attribute *r2nm_node_attrs[] = {
+ [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
+ [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
+ [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
+ [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
+ NULL,
+};
+
+static int r2nm_attr_index(struct configfs_attribute *attr)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
+ if (attr == r2nm_node_attrs[i])
+ return i;
+ }
+ BUG();
+ return 0;
+}
+
+static ssize_t r2nm_node_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_node_attribute *r2nm_node_attr =
+ container_of(attr, struct r2nm_node_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2nm_node_attr->show)
+ ret = r2nm_node_attr->show(node, page);
+ return ret;
+}
+
+static ssize_t r2nm_node_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_node_attribute *r2nm_node_attr =
+ container_of(attr, struct r2nm_node_attribute, attr);
+ ssize_t ret;
+ int attr_index = r2nm_attr_index(attr);
+
+ if (r2nm_node_attr->store == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(attr_index, &node->nd_set_attributes))
+ return -EBUSY;
+
+ ret = r2nm_node_attr->store(node, page, count);
+ if (ret < count)
+ goto out;
+
+ set_bit(attr_index, &node->nd_set_attributes);
+out:
+ return ret;
+}
+
+static struct configfs_item_operations r2nm_node_item_ops = {
+ .release = r2nm_node_release,
+ .show_attribute = r2nm_node_show,
+ .store_attribute = r2nm_node_store,
+};
+
+static struct config_item_type r2nm_node_type = {
+ .ct_item_ops = &r2nm_node_item_ops,
+ .ct_attrs = r2nm_node_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* node set */
+
+struct r2nm_node_group {
+ struct config_group ns_group;
+ /* some stuff? */
+};
+
+#if 0
+static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct r2nm_node_group, ns_group)
+ : NULL;
+}
+#endif
+
+struct r2nm_cluster_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2nm_cluster *, char *);
+ ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
+};
+
+static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
+ unsigned int *val)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp == 0)
+ return -EINVAL;
+ if (tmp >= (u32)-1)
+ return -ERANGE;
+
+ *val = tmp;
+
+ return count;
+}
+
+static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
+}
+
+static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ ssize_t ret;
+ unsigned int val = 0;
+
+ ret = r2nm_cluster_attr_write(page, count, &val);
+
+ if (ret > 0) {
+ if (cluster->cl_idle_timeout_ms != val
+ && r2net_num_connected_peers()) {
+ mlog(ML_NOTICE,
+ "r2net: cannot change idle timeout after "
+ "the first peer has agreed to it."
+ " %d connected peers\n",
+ r2net_num_connected_peers());
+ ret = -EINVAL;
+ } else if (val <= cluster->cl_keepalive_delay_ms) {
+ mlog(ML_NOTICE, "r2net: idle timeout must be larger "
+ "than keepalive delay\n");
+ ret = -EINVAL;
+ } else {
+ cluster->cl_idle_timeout_ms = val;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ ssize_t ret;
+ unsigned int val = 0;
+
+ ret = r2nm_cluster_attr_write(page, count, &val);
+
+ if (ret > 0) {
+ if (cluster->cl_keepalive_delay_ms != val
+ && r2net_num_connected_peers()) {
+ mlog(ML_NOTICE,
+ "r2net: cannot change keepalive delay after"
+ " the first peer has agreed to it."
+ " %d connected peers\n",
+ r2net_num_connected_peers());
+ ret = -EINVAL;
+ } else if (val >= cluster->cl_idle_timeout_ms) {
+ mlog(ML_NOTICE, "r2net: keepalive delay must be "
+ "smaller than idle timeout\n");
+ ret = -EINVAL;
+ } else {
+ cluster->cl_keepalive_delay_ms = val;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ return r2nm_cluster_attr_write(page, count,
+ &cluster->cl_reconnect_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_fence_method_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ ssize_t ret = 0;
+
+ if (cluster)
+ ret = sprintf(page, "%s\n",
+ r2nm_fence_method_desc[cluster->cl_fence_method]);
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_fence_method_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ unsigned int i;
+
+ if (page[count - 1] != '\n')
+ goto bail;
+
+ for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
+ if (count != strlen(r2nm_fence_method_desc[i]) + 1)
+ continue;
+ if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
+ continue;
+ if (cluster->cl_fence_method != i) {
+ printk(KERN_INFO "ramster: Changing fence method to %s\n",
+ r2nm_fence_method_desc[i]);
+ cluster->cl_fence_method = i;
+ }
+ return count;
+ }
+
+bail:
+ return -EINVAL;
+}
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "idle_timeout_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_idle_timeout_ms_read,
+ .store = r2nm_cluster_attr_idle_timeout_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "keepalive_delay_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_keepalive_delay_ms_read,
+ .store = r2nm_cluster_attr_keepalive_delay_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "reconnect_delay_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_reconnect_delay_ms_read,
+ .store = r2nm_cluster_attr_reconnect_delay_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "fence_method",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_fence_method_read,
+ .store = r2nm_cluster_attr_fence_method_write,
+};
+
+static struct configfs_attribute *r2nm_cluster_attrs[] = {
+ &r2nm_cluster_attr_idle_timeout_ms.attr,
+ &r2nm_cluster_attr_keepalive_delay_ms.attr,
+ &r2nm_cluster_attr_reconnect_delay_ms.attr,
+ &r2nm_cluster_attr_fence_method.attr,
+ NULL,
+};
+static ssize_t r2nm_cluster_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ struct r2nm_cluster_attribute *r2nm_cluster_attr =
+ container_of(attr, struct r2nm_cluster_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2nm_cluster_attr->show)
+ ret = r2nm_cluster_attr->show(cluster, page);
+ return ret;
+}
+
+static ssize_t r2nm_cluster_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ struct r2nm_cluster_attribute *r2nm_cluster_attr =
+ container_of(attr, struct r2nm_cluster_attribute, attr);
+ ssize_t ret;
+
+ if (r2nm_cluster_attr->store == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = r2nm_cluster_attr->store(cluster, page, count);
+ if (ret < count)
+ goto out;
+out:
+ return ret;
+}
+
+static struct config_item *r2nm_node_group_make_item(struct config_group *group,
+ const char *name)
+{
+ struct r2nm_node *node = NULL;
+
+ if (strlen(name) > R2NM_MAX_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
+ if (node == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
+ config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
+ spin_lock_init(&node->nd_lock);
+
+ mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
+
+ return &node->nd_item;
+}
+
+static void r2nm_node_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_cluster *cluster =
+ to_r2nm_cluster(group->cg_item.ci_parent);
+
+ r2net_disconnect_node(node);
+
+ if (cluster->cl_has_local &&
+ (cluster->cl_local_node == node->nd_num)) {
+ cluster->cl_has_local = 0;
+ cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
+ r2net_stop_listening(node);
+ }
+
+ /* XXX call into net to stop this node from trading messages */
+
+ write_lock(&cluster->cl_nodes_lock);
+
+ /* XXX sloppy */
+ if (node->nd_ipv4_address)
+ rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+
+ /* nd_num might be 0 if the node number hasn't been set.. */
+ if (cluster->cl_nodes[node->nd_num] == node) {
+ cluster->cl_nodes[node->nd_num] = NULL;
+ clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+
+ mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
+ config_item_name(&node->nd_item));
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations r2nm_node_group_group_ops = {
+ .make_item = r2nm_node_group_make_item,
+ .drop_item = r2nm_node_group_drop_item,
+};
+
+static struct config_item_type r2nm_node_group_type = {
+ .ct_group_ops = &r2nm_node_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster */
+
+static void r2nm_cluster_release(struct config_item *item)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+
+ kfree(cluster->cl_group.default_groups);
+ kfree(cluster);
+}
+
+static struct configfs_item_operations r2nm_cluster_item_ops = {
+ .release = r2nm_cluster_release,
+ .show_attribute = r2nm_cluster_show,
+ .store_attribute = r2nm_cluster_store,
+};
+
+static struct config_item_type r2nm_cluster_type = {
+ .ct_item_ops = &r2nm_cluster_item_ops,
+ .ct_attrs = r2nm_cluster_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster set */
+
+struct r2nm_cluster_group {
+ struct configfs_subsystem cs_subsys;
+ /* some stuff? */
+};
+
+#if 0
+static struct r2nm_cluster_group *
+to_r2nm_cluster_group(struct config_group *group)
+{
+ return group ?
+ container_of(to_configfs_subsystem(group),
+ struct r2nm_cluster_group, cs_subsys)
+ : NULL;
+}
+#endif
+
+static struct config_group *
+r2nm_cluster_group_make_group(struct config_group *group,
+ const char *name)
+{
+ struct r2nm_cluster *cluster = NULL;
+ struct r2nm_node_group *ns = NULL;
+ struct config_group *r2hb_group = NULL, *ret = NULL;
+ void *defs = NULL;
+
+ /* this runs under the parent dir's i_mutex; there can be only
+ * one caller in here at a time */
+ if (r2nm_single_cluster)
+ return ERR_PTR(-ENOSPC);
+
+ cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
+ ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
+ defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
+ r2hb_group = r2hb_alloc_hb_set();
+ if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
+ goto out;
+
+ config_group_init_type_name(&cluster->cl_group, name,
+ &r2nm_cluster_type);
+ config_group_init_type_name(&ns->ns_group, "node",
+ &r2nm_node_group_type);
+
+ cluster->cl_group.default_groups = defs;
+ cluster->cl_group.default_groups[0] = &ns->ns_group;
+ cluster->cl_group.default_groups[1] = r2hb_group;
+ cluster->cl_group.default_groups[2] = NULL;
+ rwlock_init(&cluster->cl_nodes_lock);
+ cluster->cl_node_ip_tree = RB_ROOT;
+ cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
+ cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
+ cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
+ cluster->cl_fence_method = R2NM_FENCE_RESET;
+
+ ret = &cluster->cl_group;
+ r2nm_single_cluster = cluster;
+
+out:
+ if (ret == NULL) {
+ kfree(cluster);
+ kfree(ns);
+ r2hb_free_hb_set(r2hb_group);
+ kfree(defs);
+ ret = ERR_PTR(-ENOMEM);
+ }
+
+ return ret;
+}
+
+static void r2nm_cluster_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ int i;
+ struct config_item *killme;
+
+ BUG_ON(r2nm_single_cluster != cluster);
+ r2nm_single_cluster = NULL;
+
+ for (i = 0; cluster->cl_group.default_groups[i]; i++) {
+ killme = &cluster->cl_group.default_groups[i]->cg_item;
+ cluster->cl_group.default_groups[i] = NULL;
+ config_item_put(killme);
+ }
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations r2nm_cluster_group_group_ops = {
+ .make_group = r2nm_cluster_group_make_group,
+ .drop_item = r2nm_cluster_group_drop_item,
+};
+
+static struct config_item_type r2nm_cluster_group_type = {
+ .ct_group_ops = &r2nm_cluster_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct r2nm_cluster_group r2nm_cluster_group = {
+ .cs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "cluster",
+ .ci_type = &r2nm_cluster_group_type,
+ },
+ },
+ },
+};
+
+int r2nm_depend_item(struct config_item *item)
+{
+ return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
+}
+
+void r2nm_undepend_item(struct config_item *item)
+{
+ configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
+}
+
+int r2nm_depend_this_node(void)
+{
+ int ret = 0;
+ struct r2nm_node *local_node;
+
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ if (!local_node) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = r2nm_depend_item(&local_node->nd_item);
+ r2nm_node_put(local_node);
+
+out:
+ return ret;
+}
+
+void r2nm_undepend_this_node(void)
+{
+ struct r2nm_node *local_node;
+
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ BUG_ON(!local_node);
+
+ r2nm_undepend_item(&local_node->nd_item);
+ r2nm_node_put(local_node);
+}
+
+
+static void __exit exit_r2nm(void)
+{
+ /* XXX sync with hb callbacks and shut down hb? */
+ r2net_unregister_hb_callbacks();
+ configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
+
+ r2net_exit();
+ r2hb_exit();
+}
+
+static int __init init_r2nm(void)
+{
+ int ret = -1;
+
+ ret = r2hb_init();
+ if (ret)
+ goto out;
+
+ ret = r2net_init();
+ if (ret)
+ goto out_r2hb;
+
+ ret = r2net_register_hb_callbacks();
+ if (ret)
+ goto out_r2net;
+
+ config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
+ mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
+ ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
+ if (ret) {
+ printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
+ goto out_callbacks;
+ }
+
+ if (!ret)
+ goto out;
+
+ configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
+out_callbacks:
+ r2net_unregister_hb_callbacks();
+out_r2net:
+ r2net_exit();
+out_r2hb:
+ r2hb_exit();
+out:
+ return ret;
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+module_init(init_r2nm)
+module_exit(exit_r2nm)
diff --git a/drivers/staging/ramster/cluster/nodemanager.h b/drivers/staging/ramster/cluster/nodemanager.h
new file mode 100644
index 00000000000..41a04df5842
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.h
@@ -0,0 +1,88 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * nodemanager.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_NODEMANAGER_H
+#define R2CLUSTER_NODEMANAGER_H
+
+#include "ramster_nodemanager.h"
+
+/* This totally doesn't belong here. */
+#include <linux/configfs.h>
+#include <linux/rbtree.h>
+
+enum r2nm_fence_method {
+ R2NM_FENCE_RESET = 0,
+ R2NM_FENCE_PANIC,
+ R2NM_FENCE_METHODS, /* Number of fence methods */
+};
+
+struct r2nm_node {
+ spinlock_t nd_lock;
+ struct config_item nd_item;
+ char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
+ __u8 nd_num;
+ /* only one address per node, as attributes, for now. */
+ __be32 nd_ipv4_address;
+ __be16 nd_ipv4_port;
+ struct rb_node nd_ip_node;
+ /* there can be only one local node for now */
+ int nd_local;
+
+ unsigned long nd_set_attributes;
+};
+
+struct r2nm_cluster {
+ struct config_group cl_group;
+ unsigned cl_has_local:1;
+ u8 cl_local_node;
+ rwlock_t cl_nodes_lock;
+ struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
+ struct rb_root cl_node_ip_tree;
+ unsigned int cl_idle_timeout_ms;
+ unsigned int cl_keepalive_delay_ms;
+ unsigned int cl_reconnect_delay_ms;
+ enum r2nm_fence_method cl_fence_method;
+
+ /* part of a hack for disk bitmap.. will go eventually. - zab */
+ unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
+};
+
+extern struct r2nm_cluster *r2nm_single_cluster;
+
+u8 r2nm_this_node(void);
+
+int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
+struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
+struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
+void r2nm_node_get(struct r2nm_node *node);
+void r2nm_node_put(struct r2nm_node *node);
+
+int r2nm_depend_item(struct config_item *item);
+void r2nm_undepend_item(struct config_item *item);
+int r2nm_depend_this_node(void);
+void r2nm_undepend_this_node(void);
+
+#endif /* R2CLUSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/ramster_nodemanager.h b/drivers/staging/ramster/cluster/ramster_nodemanager.h
new file mode 100644
index 00000000000..49f879d943a
--- /dev/null
+++ b/drivers/staging/ramster/cluster/ramster_nodemanager.h
@@ -0,0 +1,39 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ramster_nodemanager.h
+ *
+ * Header describing the interface between userspace and the kernel
+ * for the ramster_nodemanager module.
+ *
+ * Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef _RAMSTER_NODEMANAGER_H
+#define _RAMSTER_NODEMANAGER_H
+
+#define R2NM_API_VERSION 5
+
+#define R2NM_MAX_NODES 255
+#define R2NM_INVALID_NODE_NUM 255
+
+/* host name, group name, cluster name all 64 bytes */
+#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
+
+#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
new file mode 100644
index 00000000000..3af1b2c51b7
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -0,0 +1,2256 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ *
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * ----
+ *
+ * Callers for this were originally written against a very simple synchronus
+ * API. This implementation reflects those simple callers. Some day I'm sure
+ * we'll need to move to a more robust posting/callback mechanism.
+ *
+ * Transmit calls pass in kernel virtual addresses and block copying this into
+ * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
+ * for a failed socket to timeout. TX callers can also pass in a poniter to an
+ * 'int' which gets filled with an errno off the wire in response to the
+ * message they send.
+ *
+ * Handlers for unsolicited messages are registered. Each socket has a page
+ * that incoming data is copied into. First the header, then the data.
+ * Handlers are called from only one thread with a reference to this per-socket
+ * page. This page is destroyed after the handler call, so it can't be
+ * referenced beyond the call. Handlers may block but are discouraged from
+ * doing so.
+ *
+ * Any framing errors (bad magic, large payload lengths) close a connection.
+ *
+ * Our sock_container holds the state we associate with a socket. It's current
+ * framing state is held there as well as the refcounting we do around when it
+ * is safe to tear down the socket. The socket is only finally torn down from
+ * the container when the container loses all of its references -- so as long
+ * as you hold a ref on the container you can trust that the socket is valid
+ * for use with kernel socket APIs.
+ *
+ * Connections are initiated between a pair of nodes when the node with the
+ * higher node number gets a heartbeat callback which indicates that the lower
+ * numbered node has started heartbeating. The lower numbered node is passive
+ * and only accepts the connection if the higher numbered node is heartbeating.
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/net.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <net/tcp.h>
+
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_TCP
+#include "masklog.h"
+
+#include "tcp_internal.h"
+
+#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
+
+/*
+ * In the following two log macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define msglog(hdr, fmt, args...) do { \
+ typeof(hdr) __hdr = (hdr); \
+ mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
+ "key %08x num %u] " fmt, \
+ be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
+ be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
+ be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
+ be32_to_cpu(__hdr->msg_num) , ##args); \
+} while (0)
+
+#define sclog(sc, fmt, args...) do { \
+ typeof(sc) __sc = (sc); \
+ mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
+ "pg_off %zu] " fmt, __sc, \
+ atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
+ __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
+ ##args); \
+} while (0)
+
+static DEFINE_RWLOCK(r2net_handler_lock);
+static struct rb_root r2net_handler_tree = RB_ROOT;
+
+static struct r2net_node r2net_nodes[R2NM_MAX_NODES];
+
+/* XXX someday we'll need better accounting */
+static struct socket *r2net_listen_sock;
+
+/*
+ * listen work is only queued by the listening socket callbacks on the
+ * r2net_wq. teardown detaches the callbacks before destroying the workqueue.
+ * quorum work is queued as sock containers are shutdown.. stop_listening
+ * tears down all the node's sock containers, preventing future shutdowns
+ * and queued quroum work, before canceling delayed quorum work and
+ * destroying the work queue.
+ */
+static struct workqueue_struct *r2net_wq;
+static struct work_struct r2net_listen_work;
+
+static struct r2hb_callback_func r2net_hb_up, r2net_hb_down;
+#define R2NET_HB_PRI 0x1
+
+static struct r2net_handshake *r2net_hand;
+static struct r2net_msg *r2net_keep_req, *r2net_keep_resp;
+
+static int r2net_sys_err_translations[R2NET_ERR_MAX] = {
+ [R2NET_ERR_NONE] = 0,
+ [R2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
+ [R2NET_ERR_OVERFLOW] = -EOVERFLOW,
+ [R2NET_ERR_DIED] = -EHOSTDOWN,};
+
+/* can't quite avoid *all* internal declarations :/ */
+static void r2net_sc_connect_completed(struct work_struct *work);
+static void r2net_rx_until_empty(struct work_struct *work);
+static void r2net_shutdown_sc(struct work_struct *work);
+static void r2net_listen_data_ready(struct sock *sk, int bytes);
+static void r2net_sc_send_keep_req(struct work_struct *work);
+static void r2net_idle_timer(unsigned long data);
+static void r2net_sc_postpone_idle(struct r2net_sock_container *sc);
+static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc);
+
+#ifdef CONFIG_DEBUG_FS
+static void r2net_init_nst(struct r2net_send_tracking *nst, u32 msgtype,
+ u32 msgkey, struct task_struct *task, u8 node)
+{
+ INIT_LIST_HEAD(&nst->st_net_debug_item);
+ nst->st_task = task;
+ nst->st_msg_type = msgtype;
+ nst->st_msg_key = msgkey;
+ nst->st_node = node;
+}
+
+static inline void r2net_set_nst_sock_time(struct r2net_send_tracking *nst)
+{
+ nst->st_sock_time = ktime_get();
+}
+
+static inline void r2net_set_nst_send_time(struct r2net_send_tracking *nst)
+{
+ nst->st_send_time = ktime_get();
+}
+
+static inline void r2net_set_nst_status_time(struct r2net_send_tracking *nst)
+{
+ nst->st_status_time = ktime_get();
+}
+
+static inline void r2net_set_nst_sock_container(struct r2net_send_tracking *nst,
+ struct r2net_sock_container *sc)
+{
+ nst->st_sc = sc;
+}
+
+static inline void r2net_set_nst_msg_id(struct r2net_send_tracking *nst,
+ u32 msg_id)
+{
+ nst->st_id = msg_id;
+}
+
+static inline void r2net_set_sock_timer(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_timer = ktime_get();
+}
+
+static inline void r2net_set_data_ready_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_data_ready = ktime_get();
+}
+
+static inline void r2net_set_advance_start_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_advance_start = ktime_get();
+}
+
+static inline void r2net_set_advance_stop_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_advance_stop = ktime_get();
+}
+
+static inline void r2net_set_func_start_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_func_start = ktime_get();
+}
+
+static inline void r2net_set_func_stop_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_func_stop = ktime_get();
+}
+
+#else /* CONFIG_DEBUG_FS */
+# define r2net_init_nst(a, b, c, d, e)
+# define r2net_set_nst_sock_time(a)
+# define r2net_set_nst_send_time(a)
+# define r2net_set_nst_status_time(a)
+# define r2net_set_nst_sock_container(a, b)
+# define r2net_set_nst_msg_id(a, b)
+# define r2net_set_sock_timer(a)
+# define r2net_set_data_ready_time(a)
+# define r2net_set_advance_start_time(a)
+# define r2net_set_advance_stop_time(a)
+# define r2net_set_func_start_time(a)
+# define r2net_set_func_stop_time(a)
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_RAMSTER_FS_STATS
+static ktime_t r2net_get_func_run_time(struct r2net_sock_container *sc)
+{
+ return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+
+static void r2net_update_send_stats(struct r2net_send_tracking *nst,
+ struct r2net_sock_container *sc)
+{
+ sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+ ktime_sub(ktime_get(),
+ nst->st_status_time));
+ sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+ ktime_sub(nst->st_status_time,
+ nst->st_send_time));
+ sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+ ktime_sub(nst->st_send_time,
+ nst->st_sock_time));
+ sc->sc_send_count++;
+}
+
+static void r2net_update_recv_stats(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+ r2net_get_func_run_time(sc));
+ sc->sc_recv_count++;
+}
+
+#else
+
+# define r2net_update_send_stats(a, b)
+
+# define r2net_update_recv_stats(sc)
+
+#endif /* CONFIG_RAMSTER_FS_STATS */
+
+static inline int r2net_reconnect_delay(void)
+{
+ return r2nm_single_cluster->cl_reconnect_delay_ms;
+}
+
+static inline int r2net_keepalive_delay(void)
+{
+ return r2nm_single_cluster->cl_keepalive_delay_ms;
+}
+
+static inline int r2net_idle_timeout(void)
+{
+ return r2nm_single_cluster->cl_idle_timeout_ms;
+}
+
+static inline int r2net_sys_err_to_errno(enum r2net_system_error err)
+{
+ int trans;
+ BUG_ON(err >= R2NET_ERR_MAX);
+ trans = r2net_sys_err_translations[err];
+
+ /* Just in case we mess up the translation table above */
+ BUG_ON(err != R2NET_ERR_NONE && trans == 0);
+ return trans;
+}
+
+struct r2net_node *r2net_nn_from_num(u8 node_num)
+{
+ BUG_ON(node_num >= ARRAY_SIZE(r2net_nodes));
+ return &r2net_nodes[node_num];
+}
+
+static u8 r2net_num_from_nn(struct r2net_node *nn)
+{
+ BUG_ON(nn == NULL);
+ return nn - r2net_nodes;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
+{
+ int ret = 0;
+
+ do {
+ if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
+ ret = -EAGAIN;
+ break;
+ }
+ spin_lock(&nn->nn_lock);
+ ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
+ if (ret == 0)
+ list_add_tail(&nsw->ns_node_item,
+ &nn->nn_status_list);
+ spin_unlock(&nn->nn_lock);
+ } while (ret == -EAGAIN);
+
+ if (ret == 0) {
+ init_waitqueue_head(&nsw->ns_wq);
+ nsw->ns_sys_status = R2NET_ERR_NONE;
+ nsw->ns_status = 0;
+ }
+
+ return ret;
+}
+
+static void r2net_complete_nsw_locked(struct r2net_node *nn,
+ struct r2net_status_wait *nsw,
+ enum r2net_system_error sys_status,
+ s32 status)
+{
+ assert_spin_locked(&nn->nn_lock);
+
+ if (!list_empty(&nsw->ns_node_item)) {
+ list_del_init(&nsw->ns_node_item);
+ nsw->ns_sys_status = sys_status;
+ nsw->ns_status = status;
+ idr_remove(&nn->nn_status_idr, nsw->ns_id);
+ wake_up(&nsw->ns_wq);
+ }
+}
+
+static void r2net_complete_nsw(struct r2net_node *nn,
+ struct r2net_status_wait *nsw,
+ u64 id, enum r2net_system_error sys_status,
+ s32 status)
+{
+ spin_lock(&nn->nn_lock);
+ if (nsw == NULL) {
+ if (id > INT_MAX)
+ goto out;
+
+ nsw = idr_find(&nn->nn_status_idr, id);
+ if (nsw == NULL)
+ goto out;
+ }
+
+ r2net_complete_nsw_locked(nn, nsw, sys_status, status);
+
+out:
+ spin_unlock(&nn->nn_lock);
+ return;
+}
+
+static void r2net_complete_nodes_nsw(struct r2net_node *nn)
+{
+ struct r2net_status_wait *nsw, *tmp;
+ unsigned int num_kills = 0;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
+ r2net_complete_nsw_locked(nn, nsw, R2NET_ERR_DIED, 0);
+ num_kills++;
+ }
+
+ mlog(0, "completed %d messages for node %u\n", num_kills,
+ r2net_num_from_nn(nn));
+}
+
+static int r2net_nsw_completed(struct r2net_node *nn,
+ struct r2net_status_wait *nsw)
+{
+ int completed;
+ spin_lock(&nn->nn_lock);
+ completed = list_empty(&nsw->ns_node_item);
+ spin_unlock(&nn->nn_lock);
+ return completed;
+}
+
+/* ------------------------------------------------------------ */
+
+static void sc_kref_release(struct kref *kref)
+{
+ struct r2net_sock_container *sc = container_of(kref,
+ struct r2net_sock_container, sc_kref);
+ BUG_ON(timer_pending(&sc->sc_idle_timeout));
+
+ sclog(sc, "releasing\n");
+
+ if (sc->sc_sock) {
+ sock_release(sc->sc_sock);
+ sc->sc_sock = NULL;
+ }
+
+ r2nm_undepend_item(&sc->sc_node->nd_item);
+ r2nm_node_put(sc->sc_node);
+ sc->sc_node = NULL;
+
+ r2net_debug_del_sc(sc);
+ kfree(sc);
+}
+
+static void sc_put(struct r2net_sock_container *sc)
+{
+ sclog(sc, "put\n");
+ kref_put(&sc->sc_kref, sc_kref_release);
+}
+static void sc_get(struct r2net_sock_container *sc)
+{
+ sclog(sc, "get\n");
+ kref_get(&sc->sc_kref);
+}
+static struct r2net_sock_container *sc_alloc(struct r2nm_node *node)
+{
+ struct r2net_sock_container *sc, *ret = NULL;
+ struct page *page = NULL;
+ int status = 0;
+
+ page = alloc_page(GFP_NOFS);
+ sc = kzalloc(sizeof(*sc), GFP_NOFS);
+ if (sc == NULL || page == NULL)
+ goto out;
+
+ kref_init(&sc->sc_kref);
+ r2nm_node_get(node);
+ sc->sc_node = node;
+
+ /* pin the node item of the remote node */
+ status = r2nm_depend_item(&node->nd_item);
+ if (status) {
+ mlog_errno(status);
+ r2nm_node_put(node);
+ goto out;
+ }
+ INIT_WORK(&sc->sc_connect_work, r2net_sc_connect_completed);
+ INIT_WORK(&sc->sc_rx_work, r2net_rx_until_empty);
+ INIT_WORK(&sc->sc_shutdown_work, r2net_shutdown_sc);
+ INIT_DELAYED_WORK(&sc->sc_keepalive_work, r2net_sc_send_keep_req);
+
+ init_timer(&sc->sc_idle_timeout);
+ sc->sc_idle_timeout.function = r2net_idle_timer;
+ sc->sc_idle_timeout.data = (unsigned long)sc;
+
+ sclog(sc, "alloced\n");
+
+ ret = sc;
+ sc->sc_page = page;
+ r2net_debug_add_sc(sc);
+ sc = NULL;
+ page = NULL;
+
+out:
+ if (page)
+ __free_page(page);
+ kfree(sc);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static void r2net_sc_queue_work(struct r2net_sock_container *sc,
+ struct work_struct *work)
+{
+ sc_get(sc);
+ if (!queue_work(r2net_wq, work))
+ sc_put(sc);
+}
+static void r2net_sc_queue_delayed_work(struct r2net_sock_container *sc,
+ struct delayed_work *work,
+ int delay)
+{
+ sc_get(sc);
+ if (!queue_delayed_work(r2net_wq, work, delay))
+ sc_put(sc);
+}
+static void r2net_sc_cancel_delayed_work(struct r2net_sock_container *sc,
+ struct delayed_work *work)
+{
+ if (cancel_delayed_work(work))
+ sc_put(sc);
+}
+
+static atomic_t r2net_connected_peers = ATOMIC_INIT(0);
+
+int r2net_num_connected_peers(void)
+{
+ return atomic_read(&r2net_connected_peers);
+}
+
+static void r2net_set_nn_state(struct r2net_node *nn,
+ struct r2net_sock_container *sc,
+ unsigned valid, int err)
+{
+ int was_valid = nn->nn_sc_valid;
+ int was_err = nn->nn_persistent_error;
+ struct r2net_sock_container *old_sc = nn->nn_sc;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ if (old_sc && !sc)
+ atomic_dec(&r2net_connected_peers);
+ else if (!old_sc && sc)
+ atomic_inc(&r2net_connected_peers);
+
+ /* the node num comparison and single connect/accept path should stop
+ * an non-null sc from being overwritten with another */
+ BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
+ mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
+ mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
+
+ if (was_valid && !valid && err == 0)
+ err = -ENOTCONN;
+
+ mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
+ r2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
+ nn->nn_persistent_error, err);
+
+ nn->nn_sc = sc;
+ nn->nn_sc_valid = valid ? 1 : 0;
+ nn->nn_persistent_error = err;
+
+ /* mirrors r2net_tx_can_proceed() */
+ if (nn->nn_persistent_error || nn->nn_sc_valid)
+ wake_up(&nn->nn_sc_wq);
+
+ if (!was_err && nn->nn_persistent_error) {
+ queue_delayed_work(r2net_wq, &nn->nn_still_up,
+ msecs_to_jiffies(R2NET_QUORUM_DELAY_MS));
+ }
+
+ if (was_valid && !valid) {
+ printk(KERN_NOTICE "ramster: No longer connected to "
+ SC_NODEF_FMT "\n",
+ old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
+ &old_sc->sc_node->nd_ipv4_address,
+ ntohs(old_sc->sc_node->nd_ipv4_port));
+ r2net_complete_nodes_nsw(nn);
+ }
+
+ if (!was_valid && valid) {
+ cancel_delayed_work(&nn->nn_connect_expired);
+ printk(KERN_NOTICE "ramster: %s " SC_NODEF_FMT "\n",
+ r2nm_this_node() > sc->sc_node->nd_num ?
+ "Connected to" : "Accepted connection from",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port));
+ }
+
+ /* trigger the connecting worker func as long as we're not valid,
+ * it will back off if it shouldn't connect. This can be called
+ * from node config teardown and so needs to be careful about
+ * the work queue actually being up. */
+ if (!valid && r2net_wq) {
+ unsigned long delay;
+ /* delay if we're within a RECONNECT_DELAY of the
+ * last attempt */
+ delay = (nn->nn_last_connect_attempt +
+ msecs_to_jiffies(r2net_reconnect_delay()))
+ - jiffies;
+ if (delay > msecs_to_jiffies(r2net_reconnect_delay()))
+ delay = 0;
+ mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
+ queue_delayed_work(r2net_wq, &nn->nn_connect_work, delay);
+
+ /*
+ * Delay the expired work after idle timeout.
+ *
+ * We might have lots of failed connection attempts that run
+ * through here but we only cancel the connect_expired work when
+ * a connection attempt succeeds. So only the first enqueue of
+ * the connect_expired work will do anything. The rest will see
+ * that it's already queued and do nothing.
+ */
+ delay += msecs_to_jiffies(r2net_idle_timeout());
+ queue_delayed_work(r2net_wq, &nn->nn_connect_expired, delay);
+ }
+
+ /* keep track of the nn's sc ref for the caller */
+ if ((old_sc == NULL) && sc)
+ sc_get(sc);
+ if (old_sc && (old_sc != sc)) {
+ r2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
+ sc_put(old_sc);
+ }
+}
+
+/* see r2net_register_callbacks() */
+static void r2net_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_user_data) {
+ struct r2net_sock_container *sc = sk->sk_user_data;
+ sclog(sc, "data_ready hit\n");
+ r2net_set_data_ready_time(sc);
+ r2net_sc_queue_work(sc, &sc->sc_rx_work);
+ ready = sc->sc_data_ready;
+ } else {
+ ready = sk->sk_data_ready;
+ }
+ read_unlock(&sk->sk_callback_lock);
+
+ ready(sk, bytes);
+}
+
+/* see r2net_register_callbacks() */
+static void r2net_state_change(struct sock *sk)
+{
+ void (*state_change)(struct sock *sk);
+ struct r2net_sock_container *sc;
+
+ read_lock(&sk->sk_callback_lock);
+ sc = sk->sk_user_data;
+ if (sc == NULL) {
+ state_change = sk->sk_state_change;
+ goto out;
+ }
+
+ sclog(sc, "state_change to %d\n", sk->sk_state);
+
+ state_change = sc->sc_state_change;
+
+ switch (sk->sk_state) {
+
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ r2net_sc_queue_work(sc, &sc->sc_connect_work);
+ break;
+ default:
+ printk(KERN_INFO "ramster: Connection to "
+ SC_NODEF_FMT " shutdown, state %d\n",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), sk->sk_state);
+ r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+ break;
+
+ }
+out:
+ read_unlock(&sk->sk_callback_lock);
+ state_change(sk);
+}
+
+/*
+ * we register callbacks so we can queue work on events before calling
+ * the original callbacks. our callbacks our careful to test user_data
+ * to discover when they've reaced with r2net_unregister_callbacks().
+ */
+static void r2net_register_callbacks(struct sock *sk,
+ struct r2net_sock_container *sc)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+
+ /* accepted sockets inherit the old listen socket data ready */
+ if (sk->sk_data_ready == r2net_listen_data_ready) {
+ sk->sk_data_ready = sk->sk_user_data;
+ sk->sk_user_data = NULL;
+ }
+
+ BUG_ON(sk->sk_user_data != NULL);
+ sk->sk_user_data = sc;
+ sc_get(sc);
+
+ sc->sc_data_ready = sk->sk_data_ready;
+ sc->sc_state_change = sk->sk_state_change;
+ sk->sk_data_ready = r2net_data_ready;
+ sk->sk_state_change = r2net_state_change;
+
+ mutex_init(&sc->sc_send_lock);
+
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int r2net_unregister_callbacks(struct sock *sk,
+ struct r2net_sock_container *sc)
+{
+ int ret = 0;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_user_data == sc) {
+ ret = 1;
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = sc->sc_data_ready;
+ sk->sk_state_change = sc->sc_state_change;
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ return ret;
+}
+
+/*
+ * this is a little helper that is called by callers who have seen a problem
+ * with an sc and want to detach it from the nn if someone already hasn't beat
+ * them to it. if an error is given then the shutdown will be persistent
+ * and pending transmits will be canceled.
+ */
+static void r2net_ensure_shutdown(struct r2net_node *nn,
+ struct r2net_sock_container *sc,
+ int err)
+{
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc == sc)
+ r2net_set_nn_state(nn, NULL, 0, err);
+ spin_unlock(&nn->nn_lock);
+}
+
+/*
+ * This work queue function performs the blocking parts of socket shutdown. A
+ * few paths lead here. set_nn_state will trigger this callback if it sees an
+ * sc detached from the nn. state_change will also trigger this callback
+ * directly when it sees errors. In that case we need to call set_nn_state
+ * ourselves as state_change couldn't get the nn_lock and call set_nn_state
+ * itself.
+ */
+static void r2net_shutdown_sc(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_shutdown_work);
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+
+ sclog(sc, "shutting down\n");
+
+ /* drop the callbacks ref and call shutdown only once */
+ if (r2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
+ /* we shouldn't flush as we're in the thread, the
+ * races with pending sc work structs are harmless */
+ del_timer_sync(&sc->sc_idle_timeout);
+ r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ sc_put(sc);
+ kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
+ }
+
+ /* not fatal so failed connects before the other guy has our
+ * heartbeat can be retried */
+ r2net_ensure_shutdown(nn, sc, 0);
+ sc_put(sc);
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_handler_cmp(struct r2net_msg_handler *nmh, u32 msg_type,
+ u32 key)
+{
+ int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
+
+ if (ret == 0)
+ ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
+
+ return ret;
+}
+
+static struct r2net_msg_handler *
+r2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &r2net_handler_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct r2net_msg_handler *nmh, *ret = NULL;
+ int cmp;
+
+ while (*p) {
+ parent = *p;
+ nmh = rb_entry(parent, struct r2net_msg_handler, nh_node);
+ cmp = r2net_handler_cmp(nmh, msg_type, key);
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = nmh;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+static void r2net_handler_kref_release(struct kref *kref)
+{
+ struct r2net_msg_handler *nmh;
+ nmh = container_of(kref, struct r2net_msg_handler, nh_kref);
+
+ kfree(nmh);
+}
+
+static void r2net_handler_put(struct r2net_msg_handler *nmh)
+{
+ kref_put(&nmh->nh_kref, r2net_handler_kref_release);
+}
+
+/* max_len is protection for the handler func. incoming messages won't
+ * be given to the handler if their payload is longer than the max. */
+int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ r2net_msg_handler_func *func, void *data,
+ r2net_post_msg_handler_func *post_func,
+ struct list_head *unreg_list)
+{
+ struct r2net_msg_handler *nmh = NULL;
+ struct rb_node **p, *parent;
+ int ret = 0;
+
+ if (max_len > R2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "max_len for message handler out of range: %u\n",
+ max_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!msg_type) {
+ mlog(0, "no message type provided: %u, %p\n", msg_type, func);
+ ret = -EINVAL;
+ goto out;
+
+ }
+ if (!func) {
+ mlog(0, "no message handler provided: %u, %p\n",
+ msg_type, func);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nmh = kzalloc(sizeof(struct r2net_msg_handler), GFP_NOFS);
+ if (nmh == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nmh->nh_func = func;
+ nmh->nh_func_data = data;
+ nmh->nh_post_func = post_func;
+ nmh->nh_msg_type = msg_type;
+ nmh->nh_max_len = max_len;
+ nmh->nh_key = key;
+ /* the tree and list get this ref.. they're both removed in
+ * unregister when this ref is dropped */
+ kref_init(&nmh->nh_kref);
+ INIT_LIST_HEAD(&nmh->nh_unregister_item);
+
+ write_lock(&r2net_handler_lock);
+ if (r2net_handler_tree_lookup(msg_type, key, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&nmh->nh_node, parent, p);
+ rb_insert_color(&nmh->nh_node, &r2net_handler_tree);
+ list_add_tail(&nmh->nh_unregister_item, unreg_list);
+
+ mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
+ func, msg_type, key);
+ /* we've had some trouble with handlers seemingly vanishing. */
+ mlog_bug_on_msg(r2net_handler_tree_lookup(msg_type, key, &p,
+ &parent) == NULL,
+ "couldn't find handler we *just* registered "
+ "for type %u key %08x\n", msg_type, key);
+ }
+ write_unlock(&r2net_handler_lock);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ kfree(nmh);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2net_register_handler);
+
+void r2net_unregister_handler_list(struct list_head *list)
+{
+ struct r2net_msg_handler *nmh, *n;
+
+ write_lock(&r2net_handler_lock);
+ list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
+ mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
+ nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
+ rb_erase(&nmh->nh_node, &r2net_handler_tree);
+ list_del_init(&nmh->nh_unregister_item);
+ kref_put(&nmh->nh_kref, r2net_handler_kref_release);
+ }
+ write_unlock(&r2net_handler_lock);
+}
+EXPORT_SYMBOL_GPL(r2net_unregister_handler_list);
+
+static struct r2net_msg_handler *r2net_handler_get(u32 msg_type, u32 key)
+{
+ struct r2net_msg_handler *nmh;
+
+ read_lock(&r2net_handler_lock);
+ nmh = r2net_handler_tree_lookup(msg_type, key, NULL, NULL);
+ if (nmh)
+ kref_get(&nmh->nh_kref);
+ read_unlock(&r2net_handler_lock);
+
+ return nmh;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct kvec vec = {
+ .iov_len = len,
+ .iov_base = data,
+ };
+ struct msghdr msg = {
+ .msg_iovlen = 1,
+ .msg_iov = (struct iovec *)&vec,
+ .msg_flags = MSG_DONTWAIT,
+ };
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
+ set_fs(oldfs);
+
+ return ret;
+}
+
+static int r2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
+ size_t veclen, size_t total)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)vec,
+ .msg_iovlen = veclen,
+ };
+
+ if (sock == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_sendmsg(sock, &msg, total);
+ set_fs(oldfs);
+ if (ret != total) {
+ mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
+ total);
+ if (ret >= 0)
+ ret = -EPIPE; /* should be smarter, I bet */
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret < 0)
+ mlog(0, "returning error: %d\n", ret);
+ return ret;
+}
+
+static void r2net_sendpage(struct r2net_sock_container *sc,
+ void *kmalloced_virt,
+ size_t size)
+{
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ ssize_t ret;
+
+ while (1) {
+ mutex_lock(&sc->sc_send_lock);
+ ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
+ virt_to_page(kmalloced_virt),
+ (long)kmalloced_virt & ~PAGE_MASK,
+ size, MSG_DONTWAIT);
+ mutex_unlock(&sc->sc_send_lock);
+ if (ret == size)
+ break;
+ if (ret == (ssize_t)-EAGAIN) {
+ mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
+ " returned EAGAIN\n", size, sc->sc_node->nd_name,
+ sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port));
+ cond_resched();
+ continue;
+ }
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ " failed with %zd\n", size, sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), ret);
+ r2net_ensure_shutdown(nn, sc, 0);
+ break;
+ }
+}
+
+static void r2net_init_msg(struct r2net_msg *msg, u16 data_len,
+ u16 msg_type, u32 key)
+{
+ memset(msg, 0, sizeof(struct r2net_msg));
+ msg->magic = cpu_to_be16(R2NET_MSG_MAGIC);
+ msg->data_len = cpu_to_be16(data_len);
+ msg->msg_type = cpu_to_be16(msg_type);
+ msg->sys_status = cpu_to_be32(R2NET_ERR_NONE);
+ msg->status = 0;
+ msg->key = cpu_to_be32(key);
+}
+
+static int r2net_tx_can_proceed(struct r2net_node *nn,
+ struct r2net_sock_container **sc_ret,
+ int *error)
+{
+ int ret = 0;
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_persistent_error) {
+ ret = 1;
+ *sc_ret = NULL;
+ *error = nn->nn_persistent_error;
+ } else if (nn->nn_sc_valid) {
+ kref_get(&nn->nn_sc->sc_kref);
+
+ ret = 1;
+ *sc_ret = nn->nn_sc;
+ *error = 0;
+ }
+ spin_unlock(&nn->nn_lock);
+
+ return ret;
+}
+
+/* Get a map of all nodes to which this node is currently connected to */
+void r2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ struct r2net_sock_container *sc;
+ int node, ret;
+
+ BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memset(map, 0, bytes);
+ for (node = 0; node < R2NM_MAX_NODES; ++node) {
+ r2net_tx_can_proceed(r2net_nn_from_num(node), &sc, &ret);
+ if (!ret) {
+ set_bit(node, map);
+ sc_put(sc);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(r2net_fill_node_map);
+
+int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
+ size_t caller_veclen, u8 target_node, int *status)
+{
+ int ret = 0;
+ struct r2net_msg *msg = NULL;
+ size_t veclen, caller_bytes = 0;
+ struct kvec *vec = NULL;
+ struct r2net_sock_container *sc = NULL;
+ struct r2net_node *nn = r2net_nn_from_num(target_node);
+ struct r2net_status_wait nsw = {
+ .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
+ };
+ struct r2net_send_tracking nst;
+
+ /* this may be a general bug fix */
+ init_waitqueue_head(&nsw.ns_wq);
+
+ r2net_init_nst(&nst, msg_type, key, current, target_node);
+
+ if (r2net_wq == NULL) {
+ mlog(0, "attempt to tx without r2netd running\n");
+ ret = -ESRCH;
+ goto out;
+ }
+
+ if (caller_veclen == 0) {
+ mlog(0, "bad kvec array length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
+ if (caller_bytes > R2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "total payload len %zu too large\n", caller_bytes);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (target_node == r2nm_this_node()) {
+ ret = -ELOOP;
+ goto out;
+ }
+
+ r2net_debug_add_nst(&nst);
+
+ r2net_set_nst_sock_time(&nst);
+
+ wait_event(nn->nn_sc_wq, r2net_tx_can_proceed(nn, &sc, &ret));
+ if (ret)
+ goto out;
+
+ r2net_set_nst_sock_container(&nst, sc);
+
+ veclen = caller_veclen + 1;
+ vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
+ if (vec == NULL) {
+ mlog(0, "failed to %zu element kvec!\n", veclen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg = kmalloc(sizeof(struct r2net_msg), GFP_ATOMIC);
+ if (!msg) {
+ mlog(0, "failed to allocate a r2net_msg!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ r2net_init_msg(msg, caller_bytes, msg_type, key);
+
+ vec[0].iov_len = sizeof(struct r2net_msg);
+ vec[0].iov_base = msg;
+ memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
+
+ ret = r2net_prep_nsw(nn, &nsw);
+ if (ret)
+ goto out;
+
+ msg->msg_num = cpu_to_be32(nsw.ns_id);
+ r2net_set_nst_msg_id(&nst, nsw.ns_id);
+
+ r2net_set_nst_send_time(&nst);
+
+ /* finally, convert the message header to network byte-order
+ * and send */
+ mutex_lock(&sc->sc_send_lock);
+ ret = r2net_send_tcp_msg(sc->sc_sock, vec, veclen,
+ sizeof(struct r2net_msg) + caller_bytes);
+ mutex_unlock(&sc->sc_send_lock);
+ msglog(msg, "sending returned %d\n", ret);
+ if (ret < 0) {
+ mlog(0, "error returned from r2net_send_tcp_msg=%d\n", ret);
+ goto out;
+ }
+
+ /* wait on other node's handler */
+ r2net_set_nst_status_time(&nst);
+ wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw));
+
+ r2net_update_send_stats(&nst, sc);
+
+ /* Note that we avoid overwriting the callers status return
+ * variable if a system error was reported on the other
+ * side. Callers beware. */
+ ret = r2net_sys_err_to_errno(nsw.ns_sys_status);
+ if (status && !ret)
+ *status = nsw.ns_status;
+
+ mlog(0, "woken, returning system status %d, user status %d\n",
+ ret, nsw.ns_status);
+out:
+ r2net_debug_del_nst(&nst); /* must be before dropping sc and node */
+ if (sc)
+ sc_put(sc);
+ kfree(vec);
+ kfree(msg);
+ r2net_complete_nsw(nn, &nsw, 0, 0, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2net_send_message_vec);
+
+int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status)
+{
+ struct kvec vec = {
+ .iov_base = data,
+ .iov_len = len,
+ };
+ return r2net_send_message_vec(msg_type, key, &vec, 1,
+ target_node, status);
+}
+EXPORT_SYMBOL_GPL(r2net_send_message);
+
+static int r2net_send_status_magic(struct socket *sock, struct r2net_msg *hdr,
+ enum r2net_system_error syserr, int err)
+{
+ struct kvec vec = {
+ .iov_base = hdr,
+ .iov_len = sizeof(struct r2net_msg),
+ };
+
+ BUG_ON(syserr >= R2NET_ERR_MAX);
+
+ /* leave other fields intact from the incoming message, msg_num
+ * in particular */
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ /* twiddle the magic */
+ hdr->magic = cpu_to_be16(R2NET_MSG_STATUS_MAGIC);
+ hdr->data_len = 0;
+
+ msglog(hdr, "about to send status magic %d\n", err);
+ /* hdr has been in host byteorder this whole time */
+ return r2net_send_tcp_msg(sock, &vec, 1, sizeof(struct r2net_msg));
+}
+
+/*
+ * "data magic" is a long version of "status magic" where the message
+ * payload actually contains data to be passed in reply to certain messages
+ */
+static int r2net_send_data_magic(struct r2net_sock_container *sc,
+ struct r2net_msg *hdr,
+ void *data, size_t data_len,
+ enum r2net_system_error syserr, int err)
+{
+ struct kvec vec[2];
+ int ret;
+
+ vec[0].iov_base = hdr;
+ vec[0].iov_len = sizeof(struct r2net_msg);
+ vec[1].iov_base = data;
+ vec[1].iov_len = data_len;
+
+ BUG_ON(syserr >= R2NET_ERR_MAX);
+
+ /* leave other fields intact from the incoming message, msg_num
+ * in particular */
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC); /* twiddle magic */
+ hdr->data_len = cpu_to_be16(data_len);
+
+ msglog(hdr, "about to send data magic %d\n", err);
+ /* hdr has been in host byteorder this whole time */
+ ret = r2net_send_tcp_msg(sc->sc_sock, vec, 2,
+ sizeof(struct r2net_msg) + data_len);
+ return ret;
+}
+
+/*
+ * called by a message handler to convert an otherwise normal reply
+ * message into a "data magic" message
+ */
+void r2net_force_data_magic(struct r2net_msg *hdr, u16 msgtype, u32 msgkey)
+{
+ hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC);
+ hdr->msg_type = cpu_to_be16(msgtype);
+ hdr->key = cpu_to_be32(msgkey);
+}
+
+/* this returns -errno if the header was unknown or too large, etc.
+ * after this is called the buffer us reused for the next message */
+static int r2net_process_message(struct r2net_sock_container *sc,
+ struct r2net_msg *hdr)
+{
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ int ret = 0, handler_status;
+ enum r2net_system_error syserr;
+ struct r2net_msg_handler *nmh = NULL;
+ void *ret_data = NULL;
+ int data_magic = 0;
+
+ msglog(hdr, "processing message\n");
+
+ r2net_sc_postpone_idle(sc);
+
+ switch (be16_to_cpu(hdr->magic)) {
+
+ case R2NET_MSG_STATUS_MAGIC:
+ /* special type for returning message status */
+ r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status),
+ be32_to_cpu(hdr->status));
+ goto out;
+ case R2NET_MSG_KEEP_REQ_MAGIC:
+ r2net_sendpage(sc, r2net_keep_resp, sizeof(*r2net_keep_resp));
+ goto out;
+ case R2NET_MSG_KEEP_RESP_MAGIC:
+ goto out;
+ case R2NET_MSG_MAGIC:
+ break;
+ case R2NET_MSG_DATA_MAGIC:
+ /*
+ * unlike a normal status magic, a data magic DOES
+ * (MUST) have a handler, so the control flow is
+ * a little funky here as a result
+ */
+ data_magic = 1;
+ break;
+ default:
+ msglog(hdr, "bad magic\n");
+ ret = -EINVAL;
+ goto out;
+ break;
+ }
+
+ /* find a handler for it */
+ handler_status = 0;
+ nmh = r2net_handler_get(be16_to_cpu(hdr->msg_type),
+ be32_to_cpu(hdr->key));
+ if (!nmh) {
+ mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
+ be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
+ syserr = R2NET_ERR_NO_HNDLR;
+ goto out_respond;
+ }
+
+ syserr = R2NET_ERR_NONE;
+
+ if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
+ syserr = R2NET_ERR_OVERFLOW;
+
+ if (syserr != R2NET_ERR_NONE)
+ goto out_respond;
+
+ r2net_set_func_start_time(sc);
+ sc->sc_msg_key = be32_to_cpu(hdr->key);
+ sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
+ handler_status = (nmh->nh_func)(hdr, sizeof(struct r2net_msg) +
+ be16_to_cpu(hdr->data_len),
+ nmh->nh_func_data, &ret_data);
+ if (data_magic) {
+ /*
+ * handler handled data sent in reply to request
+ * so complete the transaction
+ */
+ r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status), handler_status);
+ goto out;
+ }
+ /*
+ * handler changed magic to DATA_MAGIC to reply to request for data,
+ * implies ret_data points to data to return and handler_status
+ * is the number of bytes of data
+ */
+ if (be16_to_cpu(hdr->magic) == R2NET_MSG_DATA_MAGIC) {
+ ret = r2net_send_data_magic(sc, hdr,
+ ret_data, handler_status,
+ syserr, 0);
+ hdr = NULL;
+ mlog(0, "sending data reply %d, syserr %d returned %d\n",
+ handler_status, syserr, ret);
+ r2net_set_func_stop_time(sc);
+
+ r2net_update_recv_stats(sc);
+ goto out;
+ }
+ r2net_set_func_stop_time(sc);
+
+ r2net_update_recv_stats(sc);
+
+out_respond:
+ /* this destroys the hdr, so don't use it after this */
+ mutex_lock(&sc->sc_send_lock);
+ ret = r2net_send_status_magic(sc->sc_sock, hdr, syserr,
+ handler_status);
+ mutex_unlock(&sc->sc_send_lock);
+ hdr = NULL;
+ mlog(0, "sending handler status %d, syserr %d returned %d\n",
+ handler_status, syserr, ret);
+
+ if (nmh) {
+ BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
+ if (nmh->nh_post_func)
+ (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
+ ret_data);
+ }
+
+out:
+ if (nmh)
+ r2net_handler_put(nmh);
+ return ret;
+}
+
+static int r2net_check_handshake(struct r2net_sock_container *sc)
+{
+ struct r2net_handshake *hand = page_address(sc->sc_page);
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+
+ if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " Advertised net "
+ "protocol version %llu but %llu is required. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ R2NET_PROTOCOL_VERSION);
+
+ /* don't bother reconnecting if its the wrong version. */
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ /*
+ * Ensure timeouts are consistent with other nodes, otherwise
+ * we can end up with one node thinking that the other must be down,
+ * but isn't. This can ultimately cause corruption.
+ */
+ if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
+ r2net_idle_timeout()) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a network "
+ "idle timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2net_idle_timeout_ms),
+ r2net_idle_timeout());
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
+ r2net_keepalive_delay()) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a keepalive "
+ "delay of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2net_keepalive_delay_ms),
+ r2net_keepalive_delay());
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
+ R2HB_MAX_WRITE_TIMEOUT_MS) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a heartbeat "
+ "timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2hb_heartbeat_timeout_ms),
+ R2HB_MAX_WRITE_TIMEOUT_MS);
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ sc->sc_handshake_ok = 1;
+
+ spin_lock(&nn->nn_lock);
+ /* set valid and queue the idle timers only if it hasn't been
+ * shut down already */
+ if (nn->nn_sc == sc) {
+ r2net_sc_reset_idle_timer(sc);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, sc, 1, 0);
+ }
+ spin_unlock(&nn->nn_lock);
+
+ /* shift everything up as though it wasn't there */
+ sc->sc_page_off -= sizeof(struct r2net_handshake);
+ if (sc->sc_page_off)
+ memmove(hand, hand + 1, sc->sc_page_off);
+
+ return 0;
+}
+
+/* this demuxes the queued rx bytes into header or payload bits and calls
+ * handlers as each full message is read off the socket. it returns -error,
+ * == 0 eof, or > 0 for progress made.*/
+static int r2net_advance_rx(struct r2net_sock_container *sc)
+{
+ struct r2net_msg *hdr;
+ int ret = 0;
+ void *data;
+ size_t datalen;
+
+ sclog(sc, "receiving\n");
+ r2net_set_advance_start_time(sc);
+
+ if (unlikely(sc->sc_handshake_ok == 0)) {
+ if (sc->sc_page_off < sizeof(struct r2net_handshake)) {
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = sizeof(struct r2net_handshake) -
+ sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0)
+ sc->sc_page_off += ret;
+ }
+
+ if (sc->sc_page_off == sizeof(struct r2net_handshake)) {
+ r2net_check_handshake(sc);
+ if (unlikely(sc->sc_handshake_ok == 0))
+ ret = -EPROTO;
+ }
+ goto out;
+ }
+
+ /* do we need more header? */
+ if (sc->sc_page_off < sizeof(struct r2net_msg)) {
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = sizeof(struct r2net_msg) - sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0) {
+ sc->sc_page_off += ret;
+ /* only swab incoming here.. we can
+ * only get here once as we cross from
+ * being under to over */
+ if (sc->sc_page_off == sizeof(struct r2net_msg)) {
+ hdr = page_address(sc->sc_page);
+ if (be16_to_cpu(hdr->data_len) >
+ R2NET_MAX_PAYLOAD_BYTES)
+ ret = -EOVERFLOW;
+ }
+ }
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off < sizeof(struct r2net_msg)) {
+ /* oof, still don't have a header */
+ goto out;
+ }
+
+ /* this was swabbed above when we first read it */
+ hdr = page_address(sc->sc_page);
+
+ msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
+
+ /* do we need more payload? */
+ if (sc->sc_page_off - sizeof(struct r2net_msg) <
+ be16_to_cpu(hdr->data_len)) {
+ /* need more payload */
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = (sizeof(struct r2net_msg) +
+ be16_to_cpu(hdr->data_len)) -
+ sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0)
+ sc->sc_page_off += ret;
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off - sizeof(struct r2net_msg) ==
+ be16_to_cpu(hdr->data_len)) {
+ /* we can only get here once, the first time we read
+ * the payload.. so set ret to progress if the handler
+ * works out. after calling this the message is toast */
+ ret = r2net_process_message(sc, hdr);
+ if (ret == 0)
+ ret = 1;
+ sc->sc_page_off = 0;
+ }
+
+out:
+ sclog(sc, "ret = %d\n", ret);
+ r2net_set_advance_stop_time(sc);
+ return ret;
+}
+
+/* this work func is triggerd by data ready. it reads until it can read no
+ * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
+ * our work the work struct will be marked and we'll be called again. */
+static void r2net_rx_until_empty(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container, sc_rx_work);
+ int ret;
+
+ do {
+ ret = r2net_advance_rx(sc);
+ } while (ret > 0);
+
+ if (ret <= 0 && ret != -EAGAIN) {
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ sclog(sc, "saw error %d, closing\n", ret);
+ /* not permanent so read failed handshake can retry */
+ r2net_ensure_shutdown(nn, sc, 0);
+ }
+
+ sc_put(sc);
+}
+
+static int r2net_set_nodelay(struct socket *sock)
+{
+ int ret, val = 1;
+ mm_segment_t oldfs;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /*
+ * Dear unsuspecting programmer,
+ *
+ * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
+ * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
+ * silently turn into SO_DEBUG.
+ *
+ * Yours,
+ * Keeper of hilariously fragile interfaces.
+ */
+ ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
+ (char __user *)&val, sizeof(val));
+
+ set_fs(oldfs);
+ return ret;
+}
+
+static void r2net_initialize_handshake(void)
+{
+ r2net_hand->r2hb_heartbeat_timeout_ms = cpu_to_be32(
+ R2HB_MAX_WRITE_TIMEOUT_MS);
+ r2net_hand->r2net_idle_timeout_ms = cpu_to_be32(r2net_idle_timeout());
+ r2net_hand->r2net_keepalive_delay_ms = cpu_to_be32(
+ r2net_keepalive_delay());
+ r2net_hand->r2net_reconnect_delay_ms = cpu_to_be32(
+ r2net_reconnect_delay());
+}
+
+/* ------------------------------------------------------------ */
+
+/* called when a connect completes and after a sock is accepted. the
+ * rx path will see the response and mark the sc valid */
+static void r2net_sc_connect_completed(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_connect_work);
+
+ mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
+ (unsigned long long)R2NET_PROTOCOL_VERSION,
+ (unsigned long long)be64_to_cpu(r2net_hand->connector_id));
+
+ r2net_initialize_handshake();
+ r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
+ sc_put(sc);
+}
+
+/* this is called as a work_struct func. */
+static void r2net_sc_send_keep_req(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_keepalive_work.work);
+
+ r2net_sendpage(sc, r2net_keep_req, sizeof(*r2net_keep_req));
+ sc_put(sc);
+}
+
+/* socket shutdown does a del_timer_sync against this as it tears down.
+ * we can't start this timer until we've got to the point in sc buildup
+ * where shutdown is going to be involved */
+static void r2net_idle_timer(unsigned long data)
+{
+ struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
+#ifdef CONFIG_DEBUG_FS
+ unsigned long msecs = ktime_to_ms(ktime_get()) -
+ ktime_to_ms(sc->sc_tv_timer);
+#else
+ unsigned long msecs = r2net_idle_timeout();
+#endif
+
+ printk(KERN_NOTICE "ramster: Connection to " SC_NODEF_FMT " has been "
+ "idle for %lu.%lu secs, shutting it down.\n",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
+ msecs / 1000, msecs % 1000);
+
+ /*
+ * Initialize the nn_timeout so that the next connection attempt
+ * will continue in r2net_start_connect.
+ */
+ /* Avoid spurious shutdowns... not sure if this is still necessary */
+ pr_err("ramster_idle_timer, skipping shutdown work\n");
+#if 0
+ /* old code used to do these two lines */
+ atomic_set(&nn->nn_timeout, 1);
+ r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+#endif
+}
+
+static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
+{
+ r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ r2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
+ msecs_to_jiffies(r2net_keepalive_delay()));
+ r2net_set_sock_timer(sc);
+ mod_timer(&sc->sc_idle_timeout,
+ jiffies + msecs_to_jiffies(r2net_idle_timeout()));
+}
+
+static void r2net_sc_postpone_idle(struct r2net_sock_container *sc)
+{
+ /* Only push out an existing timer */
+ if (timer_pending(&sc->sc_idle_timeout))
+ r2net_sc_reset_idle_timer(sc);
+}
+
+/* this work func is kicked whenever a path sets the nn state which doesn't
+ * have valid set. This includes seeing hb come up, losing a connection,
+ * having a connect attempt fail, etc. This centralizes the logic which decides
+ * if a connect attempt should be made or if we should give up and all future
+ * transmit attempts should fail */
+static void r2net_start_connect(struct work_struct *work)
+{
+ struct r2net_node *nn =
+ container_of(work, struct r2net_node, nn_connect_work.work);
+ struct r2net_sock_container *sc = NULL;
+ struct r2nm_node *node = NULL, *mynode = NULL;
+ struct socket *sock = NULL;
+ struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
+ int ret = 0, stop;
+ unsigned int timeout;
+
+ /* if we're greater we initiate tx, otherwise we accept */
+ if (r2nm_this_node() <= r2net_num_from_nn(nn))
+ goto out;
+
+ /* watch for racing with tearing a node down */
+ node = r2nm_get_node_by_num(r2net_num_from_nn(nn));
+ if (node == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ mynode = r2nm_get_node_by_num(r2nm_this_node());
+ if (mynode == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ spin_lock(&nn->nn_lock);
+ /*
+ * see if we already have one pending or have given up.
+ * For nn_timeout, it is set when we close the connection
+ * because of the idle time out. So it means that we have
+ * at least connected to that node successfully once,
+ * now try to connect to it again.
+ */
+ timeout = atomic_read(&nn->nn_timeout);
+ stop = (nn->nn_sc ||
+ (nn->nn_persistent_error &&
+ (nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
+ spin_unlock(&nn->nn_lock);
+ if (stop)
+ goto out;
+
+ nn->nn_last_connect_attempt = jiffies;
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ mlog(0, "couldn't allocate sc\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ mlog(0, "can't create socket: %d\n", ret);
+ goto out;
+ }
+ sc->sc_sock = sock; /* freed by sc_kref_release */
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ myaddr.sin_family = AF_INET;
+ myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
+ myaddr.sin_port = htons(0); /* any port */
+
+ ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
+ sizeof(myaddr));
+ if (ret) {
+ mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
+ ret, &mynode->nd_ipv4_address);
+ goto out;
+ }
+
+ ret = r2net_set_nodelay(sc->sc_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ r2net_register_callbacks(sc->sc_sock->sk, sc);
+
+ spin_lock(&nn->nn_lock);
+ /* handshake completion will set nn->nn_sc_valid */
+ r2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ remoteaddr.sin_family = AF_INET;
+ remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
+ remoteaddr.sin_port = node->nd_ipv4_port;
+
+ ret = sc->sc_sock->ops->connect(sc->sc_sock,
+ (struct sockaddr *)&remoteaddr,
+ sizeof(remoteaddr),
+ O_NONBLOCK);
+ if (ret == -EINPROGRESS)
+ ret = 0;
+
+out:
+ if (ret) {
+ printk(KERN_NOTICE "ramster: Connect attempt to " SC_NODEF_FMT
+ " failed with errno %d\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), ret);
+ /* 0 err so that another will be queued and attempted
+ * from set_nn_state */
+ if (sc)
+ r2net_ensure_shutdown(nn, sc, 0);
+ }
+ if (sc)
+ sc_put(sc);
+ if (node)
+ r2nm_node_put(node);
+ if (mynode)
+ r2nm_node_put(mynode);
+
+ return;
+}
+
+static void r2net_connect_expired(struct work_struct *work)
+{
+ struct r2net_node *nn =
+ container_of(work, struct r2net_node, nn_connect_expired.work);
+
+ spin_lock(&nn->nn_lock);
+ if (!nn->nn_sc_valid) {
+ printk(KERN_NOTICE "ramster: No connection established with "
+ "node %u after %u.%u seconds, giving up.\n",
+ r2net_num_from_nn(nn),
+ r2net_idle_timeout() / 1000,
+ r2net_idle_timeout() % 1000);
+
+ r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ }
+ spin_unlock(&nn->nn_lock);
+}
+
+static void r2net_still_up(struct work_struct *work)
+{
+}
+
+/* ------------------------------------------------------------ */
+
+void r2net_disconnect_node(struct r2nm_node *node)
+{
+ struct r2net_node *nn = r2net_nn_from_num(node->nd_num);
+
+ /* don't reconnect until it's heartbeating again */
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ spin_unlock(&nn->nn_lock);
+
+ if (r2net_wq) {
+ cancel_delayed_work(&nn->nn_connect_expired);
+ cancel_delayed_work(&nn->nn_connect_work);
+ cancel_delayed_work(&nn->nn_still_up);
+ flush_workqueue(r2net_wq);
+ }
+}
+
+static void r2net_hb_node_down_cb(struct r2nm_node *node, int node_num,
+ void *data)
+{
+ if (!node)
+ return;
+
+ if (node_num != r2nm_this_node())
+ r2net_disconnect_node(node);
+
+ BUG_ON(atomic_read(&r2net_connected_peers) < 0);
+}
+
+static void r2net_hb_node_up_cb(struct r2nm_node *node, int node_num,
+ void *data)
+{
+ struct r2net_node *nn = r2net_nn_from_num(node_num);
+
+ BUG_ON(!node);
+
+ /* ensure an immediate connect attempt */
+ nn->nn_last_connect_attempt = jiffies -
+ (msecs_to_jiffies(r2net_reconnect_delay()) + 1);
+
+ if (node_num != r2nm_this_node()) {
+ /* believe it or not, accept and node hearbeating testing
+ * can succeed for this node before we got here.. so
+ * only use set_nn_state to clear the persistent error
+ * if that hasn't already happened */
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ if (nn->nn_persistent_error)
+ r2net_set_nn_state(nn, NULL, 0, 0);
+ spin_unlock(&nn->nn_lock);
+ }
+}
+
+void r2net_unregister_hb_callbacks(void)
+{
+ r2hb_unregister_callback(NULL, &r2net_hb_up);
+ r2hb_unregister_callback(NULL, &r2net_hb_down);
+}
+
+int r2net_register_hb_callbacks(void)
+{
+ int ret;
+
+ r2hb_setup_callback(&r2net_hb_down, R2HB_NODE_DOWN_CB,
+ r2net_hb_node_down_cb, NULL, R2NET_HB_PRI);
+ r2hb_setup_callback(&r2net_hb_up, R2HB_NODE_UP_CB,
+ r2net_hb_node_up_cb, NULL, R2NET_HB_PRI);
+
+ ret = r2hb_register_callback(NULL, &r2net_hb_up);
+ if (ret == 0)
+ ret = r2hb_register_callback(NULL, &r2net_hb_down);
+
+ if (ret)
+ r2net_unregister_hb_callbacks();
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_accept_one(struct socket *sock)
+{
+ int ret, slen;
+ struct sockaddr_in sin;
+ struct socket *new_sock = NULL;
+ struct r2nm_node *node = NULL;
+ struct r2nm_node *local_node = NULL;
+ struct r2net_sock_container *sc = NULL;
+ struct r2net_node *nn;
+
+ BUG_ON(sock == NULL);
+ ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
+ sock->sk->sk_protocol, &new_sock);
+ if (ret)
+ goto out;
+
+ new_sock->type = sock->type;
+ new_sock->ops = sock->ops;
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ if (ret < 0)
+ goto out;
+
+ new_sock->sk->sk_allocation = GFP_ATOMIC;
+
+ ret = r2net_set_nodelay(new_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ slen = sizeof(sin);
+ ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
+ &slen, 1);
+ if (ret < 0)
+ goto out;
+
+ node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
+ if (node == NULL) {
+ printk(KERN_NOTICE "ramster: Attempt to connect from unknown "
+ "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (r2nm_this_node() >= node->nd_num) {
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ printk(KERN_NOTICE "ramster: Unexpected connect attempt seen "
+ "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+ "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port), node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* this happens all the time when the other node sees our heartbeat
+ * and tries to connect before we see their heartbeat */
+ if (!r2hb_check_node_heartbeating_from_callback(node->nd_num)) {
+ mlog(ML_CONN, "attempt to connect from node '%s' at "
+ "%pI4:%d but it isn't heartbeating\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nn = r2net_nn_from_num(node->nd_num);
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc)
+ ret = -EBUSY;
+ else
+ ret = 0;
+ spin_unlock(&nn->nn_lock);
+ if (ret) {
+ printk(KERN_NOTICE "ramster: Attempt to connect from node '%s' "
+ "at %pI4:%d but it already has an open connection\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ goto out;
+ }
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sc->sc_sock = new_sock;
+ new_sock = NULL;
+
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ r2net_register_callbacks(sc->sc_sock->sk, sc);
+ r2net_sc_queue_work(sc, &sc->sc_rx_work);
+
+ r2net_initialize_handshake();
+ r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
+
+out:
+ if (new_sock)
+ sock_release(new_sock);
+ if (node)
+ r2nm_node_put(node);
+ if (local_node)
+ r2nm_node_put(local_node);
+ if (sc)
+ sc_put(sc);
+ return ret;
+}
+
+static void r2net_accept_many(struct work_struct *work)
+{
+ struct socket *sock = r2net_listen_sock;
+ while (r2net_accept_one(sock) == 0)
+ cond_resched();
+}
+
+static void r2net_listen_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ ready = sk->sk_user_data;
+ if (ready == NULL) { /* check for teardown race */
+ ready = sk->sk_data_ready;
+ goto out;
+ }
+
+ /* ->sk_data_ready is also called for a newly established child socket
+ * before it has been accepted and the acceptor has set up their
+ * data_ready.. we only want to queue listen work for our listening
+ * socket */
+ if (sk->sk_state == TCP_LISTEN) {
+ mlog(ML_TCP, "bytes: %d\n", bytes);
+ queue_work(r2net_wq, &r2net_listen_work);
+ }
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+ ready(sk, bytes);
+}
+
+static int r2net_open_listening_sock(__be32 addr, __be16 port)
+{
+ struct socket *sock = NULL;
+ int ret;
+ struct sockaddr_in sin = {
+ .sin_family = PF_INET,
+ .sin_addr = { .s_addr = addr },
+ .sin_port = port,
+ };
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ printk(KERN_ERR "ramster: Error %d while creating socket\n",
+ ret);
+ goto out;
+ }
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = r2net_listen_data_ready;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ r2net_listen_sock = sock;
+ INIT_WORK(&r2net_listen_work, r2net_accept_many);
+
+ sock->sk->sk_reuse = 1;
+ ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ if (ret < 0) {
+ printk(KERN_ERR "ramster: Error %d while binding socket at "
+ "%pI4:%u\n", ret, &addr, ntohs(port));
+ goto out;
+ }
+
+ ret = sock->ops->listen(sock, 64);
+ if (ret < 0)
+ printk(KERN_ERR "ramster: Error %d while listening on %pI4:%u\n",
+ ret, &addr, ntohs(port));
+
+out:
+ if (ret) {
+ r2net_listen_sock = NULL;
+ if (sock)
+ sock_release(sock);
+ }
+ return ret;
+}
+
+/*
+ * called from node manager when we should bring up our network listening
+ * socket. node manager handles all the serialization to only call this
+ * once and to match it with r2net_stop_listening(). note,
+ * r2nm_this_node() doesn't work yet as we're being called while it
+ * is being set up.
+ */
+int r2net_start_listening(struct r2nm_node *node)
+{
+ int ret = 0;
+
+ BUG_ON(r2net_wq != NULL);
+ BUG_ON(r2net_listen_sock != NULL);
+
+ mlog(ML_KTHREAD, "starting r2net thread...\n");
+ r2net_wq = create_singlethread_workqueue("r2net");
+ if (r2net_wq == NULL) {
+ mlog(ML_ERROR, "unable to launch r2net thread\n");
+ return -ENOMEM; /* ? */
+ }
+
+ ret = r2net_open_listening_sock(node->nd_ipv4_address,
+ node->nd_ipv4_port);
+ if (ret) {
+ destroy_workqueue(r2net_wq);
+ r2net_wq = NULL;
+ }
+
+ return ret;
+}
+
+/* again, r2nm_this_node() doesn't work here as we're involved in
+ * tearing it down */
+void r2net_stop_listening(struct r2nm_node *node)
+{
+ struct socket *sock = r2net_listen_sock;
+ size_t i;
+
+ BUG_ON(r2net_wq == NULL);
+ BUG_ON(r2net_listen_sock == NULL);
+
+ /* stop the listening socket from generating work */
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_data_ready = sock->sk->sk_user_data;
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
+ struct r2nm_node *node = r2nm_get_node_by_num(i);
+ if (node) {
+ r2net_disconnect_node(node);
+ r2nm_node_put(node);
+ }
+ }
+
+ /* finish all work and tear down the work queue */
+ mlog(ML_KTHREAD, "waiting for r2net thread to exit....\n");
+ destroy_workqueue(r2net_wq);
+ r2net_wq = NULL;
+
+ sock_release(r2net_listen_sock);
+ r2net_listen_sock = NULL;
+}
+
+void r2net_hb_node_up_manual(int node_num)
+{
+ struct r2nm_node dummy;
+ if (r2nm_single_cluster == NULL)
+ pr_err("ramster: cluster not alive, node_up_manual ignored\n");
+ else {
+ r2hb_manual_set_node_heartbeating(node_num);
+ r2net_hb_node_up_cb(&dummy, node_num, NULL);
+ }
+}
+
+/* ------------------------------------------------------------ */
+
+int r2net_init(void)
+{
+ unsigned long i;
+
+ if (r2net_debugfs_init())
+ return -ENOMEM;
+
+ r2net_hand = kzalloc(sizeof(struct r2net_handshake), GFP_KERNEL);
+ r2net_keep_req = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
+ r2net_keep_resp = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
+ if (!r2net_hand || !r2net_keep_req || !r2net_keep_resp) {
+ kfree(r2net_hand);
+ kfree(r2net_keep_req);
+ kfree(r2net_keep_resp);
+ return -ENOMEM;
+ }
+
+ r2net_hand->protocol_version = cpu_to_be64(R2NET_PROTOCOL_VERSION);
+ r2net_hand->connector_id = cpu_to_be64(1);
+
+ r2net_keep_req->magic = cpu_to_be16(R2NET_MSG_KEEP_REQ_MAGIC);
+ r2net_keep_resp->magic = cpu_to_be16(R2NET_MSG_KEEP_RESP_MAGIC);
+
+ for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
+ struct r2net_node *nn = r2net_nn_from_num(i);
+
+ atomic_set(&nn->nn_timeout, 0);
+ spin_lock_init(&nn->nn_lock);
+ INIT_DELAYED_WORK(&nn->nn_connect_work, r2net_start_connect);
+ INIT_DELAYED_WORK(&nn->nn_connect_expired,
+ r2net_connect_expired);
+ INIT_DELAYED_WORK(&nn->nn_still_up, r2net_still_up);
+ /* until we see hb from a node we'll return einval */
+ nn->nn_persistent_error = -ENOTCONN;
+ init_waitqueue_head(&nn->nn_sc_wq);
+ idr_init(&nn->nn_status_idr);
+ INIT_LIST_HEAD(&nn->nn_status_list);
+ }
+
+ return 0;
+}
+
+void r2net_exit(void)
+{
+ kfree(r2net_hand);
+ kfree(r2net_keep_req);
+ kfree(r2net_keep_resp);
+ r2net_debugfs_exit();
+}
diff --git a/drivers/staging/ramster/cluster/tcp.h b/drivers/staging/ramster/cluster/tcp.h
new file mode 100644
index 00000000000..9d05833452b
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.h
@@ -0,0 +1,159 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * tcp.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_TCP_H
+#define R2CLUSTER_TCP_H
+
+#include <linux/socket.h>
+#ifdef __KERNEL__
+#include <net/sock.h>
+#include <linux/tcp.h>
+#else
+#include <sys/socket.h>
+#endif
+#include <linux/inet.h>
+#include <linux/in.h>
+
+struct r2net_msg {
+ __be16 magic;
+ __be16 data_len;
+ __be16 msg_type;
+ __be16 pad1;
+ __be32 sys_status;
+ __be32 status;
+ __be32 key;
+ __be32 msg_num;
+ __u8 buf[0];
+};
+
+typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+typedef void (r2net_post_msg_handler_func)(int status, void *data,
+ void *ret_data);
+
+#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
+
+/* same as hb delay, we're waiting for another node to recognize our hb */
+#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
+
+#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
+#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
+
+
+/* TODO: figure this out.... */
+static inline int r2net_link_down(int err, struct socket *sock)
+{
+ if (sock) {
+ if (sock->sk->sk_state != TCP_ESTABLISHED &&
+ sock->sk->sk_state != TCP_CLOSE_WAIT)
+ return 1;
+ }
+
+ if (err >= 0)
+ return 0;
+ switch (err) {
+
+ /* ????????????????????????? */
+ case -ERESTARTSYS:
+ case -EBADF:
+ /* When the server has died, an ICMP port unreachable
+ * message prompts ECONNREFUSED. */
+ case -ECONNREFUSED:
+ case -ENOTCONN:
+ case -ECONNRESET:
+ case -EPIPE:
+ return 1;
+
+ }
+ return 0;
+}
+
+enum {
+ R2NET_DRIVER_UNINITED,
+ R2NET_DRIVER_READY,
+};
+
+int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status);
+int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
+ size_t veclen, u8 target_node, int *status);
+
+int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ r2net_msg_handler_func *func, void *data,
+ r2net_post_msg_handler_func *post_func,
+ struct list_head *unreg_list);
+void r2net_unregister_handler_list(struct list_head *list);
+
+void r2net_fill_node_map(unsigned long *map, unsigned bytes);
+
+void r2net_force_data_magic(struct r2net_msg *, u16, u32);
+void r2net_hb_node_up_manual(int);
+struct r2net_node *r2net_nn_from_num(u8);
+
+struct r2nm_node;
+int r2net_register_hb_callbacks(void);
+void r2net_unregister_hb_callbacks(void);
+int r2net_start_listening(struct r2nm_node *node);
+void r2net_stop_listening(struct r2nm_node *node);
+void r2net_disconnect_node(struct r2nm_node *node);
+int r2net_num_connected_peers(void);
+
+int r2net_init(void);
+void r2net_exit(void);
+
+struct r2net_send_tracking;
+struct r2net_sock_container;
+
+#if 0
+int r2net_debugfs_init(void);
+void r2net_debugfs_exit(void);
+void r2net_debug_add_nst(struct r2net_send_tracking *nst);
+void r2net_debug_del_nst(struct r2net_send_tracking *nst);
+void r2net_debug_add_sc(struct r2net_sock_container *sc);
+void r2net_debug_del_sc(struct r2net_sock_container *sc);
+#else
+static inline int r2net_debugfs_init(void)
+{
+ return 0;
+}
+static inline void r2net_debugfs_exit(void)
+{
+}
+static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
+{
+}
+static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
+{
+}
+static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
+{
+}
+static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* R2CLUSTER_TCP_H */
diff --git a/drivers/staging/ramster/cluster/tcp_internal.h b/drivers/staging/ramster/cluster/tcp_internal.h
new file mode 100644
index 00000000000..4d8cc9f96fd
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp_internal.h
@@ -0,0 +1,248 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef R2CLUSTER_TCP_INTERNAL_H
+#define R2CLUSTER_TCP_INTERNAL_H
+
+#define R2NET_MSG_MAGIC ((u16)0xfa55)
+#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
+#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
+#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
+/*
+ * "data magic" is a long version of "status magic" where the message
+ * payload actually contains data to be passed in reply to certain messages
+ */
+#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
+
+/* we're delaying our quorum decision so that heartbeat will have timed
+ * out truly dead nodes by the time we come around to making decisions
+ * on their number */
+#define R2NET_QUORUM_DELAY_MS \
+ ((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
+
+/*
+ * This version number represents quite a lot, unfortunately. It not
+ * only represents the raw network message protocol on the wire but also
+ * locking semantics of the file system using the protocol. It should
+ * be somewhere else, I'm sure, but right now it isn't.
+ *
+ * With version 11, we separate out the filesystem locking portion. The
+ * filesystem now has a major.minor version it negotiates. Version 11
+ * introduces this negotiation to the r2dlm protocol, and as such the
+ * version here in tcp_internal.h should not need to be bumped for
+ * filesystem locking changes.
+ *
+ * New in version 11
+ * - Negotiation of filesystem locking in the dlm join.
+ *
+ * New in version 10:
+ * - Meta/data locks combined
+ *
+ * New in version 9:
+ * - All votes removed
+ *
+ * New in version 8:
+ * - Replace delete inode votes with a cluster lock
+ *
+ * New in version 7:
+ * - DLM join domain includes the live nodemap
+ *
+ * New in version 6:
+ * - DLM lockres remote refcount fixes.
+ *
+ * New in version 5:
+ * - Network timeout checking protocol
+ *
+ * New in version 4:
+ * - Remove i_generation from lock names for better stat performance.
+ *
+ * New in version 3:
+ * - Replace dentry votes with a cluster lock
+ *
+ * New in version 2:
+ * - full 64 bit i_size in the metadata lock lvbs
+ * - introduction of "rw" lock and pushing meta/data locking down
+ */
+#define R2NET_PROTOCOL_VERSION 11ULL
+struct r2net_handshake {
+ __be64 protocol_version;
+ __be64 connector_id;
+ __be32 r2hb_heartbeat_timeout_ms;
+ __be32 r2net_idle_timeout_ms;
+ __be32 r2net_keepalive_delay_ms;
+ __be32 r2net_reconnect_delay_ms;
+};
+
+struct r2net_node {
+ /* this is never called from int/bh */
+ spinlock_t nn_lock;
+
+ /* set the moment an sc is allocated and a connect is started */
+ struct r2net_sock_container *nn_sc;
+ /* _valid is only set after the handshake passes and tx can happen */
+ unsigned nn_sc_valid:1;
+ /* if this is set tx just returns it */
+ int nn_persistent_error;
+ /* It is only set to 1 after the idle time out. */
+ atomic_t nn_timeout;
+
+ /* threads waiting for an sc to arrive wait on the wq for generation
+ * to increase. it is increased when a connecting socket succeeds
+ * or fails or when an accepted socket is attached. */
+ wait_queue_head_t nn_sc_wq;
+
+ struct idr nn_status_idr;
+ struct list_head nn_status_list;
+
+ /* connects are attempted from when heartbeat comes up until either hb
+ * goes down, the node is unconfigured, no connect attempts succeed
+ * before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
+ * is queued from set_nn_state both from hb up and from itself if a
+ * connect attempt fails and so can be self-arming. shutdown is
+ * careful to first mark the nn such that no connects will be attempted
+ * before canceling delayed connect work and flushing the queue. */
+ struct delayed_work nn_connect_work;
+ unsigned long nn_last_connect_attempt;
+
+ /* this is queued as nodes come up and is canceled when a connection is
+ * established. this expiring gives up on the node and errors out
+ * transmits */
+ struct delayed_work nn_connect_expired;
+
+ /* after we give up on a socket we wait a while before deciding
+ * that it is still heartbeating and that we should do some
+ * quorum work */
+ struct delayed_work nn_still_up;
+};
+
+struct r2net_sock_container {
+ struct kref sc_kref;
+ /* the next two are valid for the life time of the sc */
+ struct socket *sc_sock;
+ struct r2nm_node *sc_node;
+
+ /* all of these sc work structs hold refs on the sc while they are
+ * queued. they should not be able to ref a freed sc. the teardown
+ * race is with r2net_wq destruction in r2net_stop_listening() */
+
+ /* rx and connect work are generated from socket callbacks. sc
+ * shutdown removes the callbacks and then flushes the work queue */
+ struct work_struct sc_rx_work;
+ struct work_struct sc_connect_work;
+ /* shutdown work is triggered in two ways. the simple way is
+ * for a code path calls ensure_shutdown which gets a lock, removes
+ * the sc from the nn, and queues the work. in this case the
+ * work is single-shot. the work is also queued from a sock
+ * callback, though, and in this case the work will find the sc
+ * still on the nn and will call ensure_shutdown itself.. this
+ * ends up triggering the shutdown work again, though nothing
+ * will be done in that second iteration. so work queue teardown
+ * has to be careful to remove the sc from the nn before waiting
+ * on the work queue so that the shutdown work doesn't remove the
+ * sc and rearm itself.
+ */
+ struct work_struct sc_shutdown_work;
+
+ struct timer_list sc_idle_timeout;
+ struct delayed_work sc_keepalive_work;
+
+ unsigned sc_handshake_ok:1;
+
+ struct page *sc_page;
+ size_t sc_page_off;
+
+ /* original handlers for the sockets */
+ void (*sc_state_change)(struct sock *sk);
+ void (*sc_data_ready)(struct sock *sk, int bytes);
+
+ u32 sc_msg_key;
+ u16 sc_msg_type;
+
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sc_net_debug_item;
+ ktime_t sc_tv_timer;
+ ktime_t sc_tv_data_ready;
+ ktime_t sc_tv_advance_start;
+ ktime_t sc_tv_advance_stop;
+ ktime_t sc_tv_func_start;
+ ktime_t sc_tv_func_stop;
+#endif
+#ifdef CONFIG_RAMSTER_FS_STATS
+ ktime_t sc_tv_acquiry_total;
+ ktime_t sc_tv_send_total;
+ ktime_t sc_tv_status_total;
+ u32 sc_send_count;
+ u32 sc_recv_count;
+ ktime_t sc_tv_process_total;
+#endif
+ struct mutex sc_send_lock;
+};
+
+struct r2net_msg_handler {
+ struct rb_node nh_node;
+ u32 nh_max_len;
+ u32 nh_msg_type;
+ u32 nh_key;
+ r2net_msg_handler_func *nh_func;
+ r2net_msg_handler_func *nh_func_data;
+ r2net_post_msg_handler_func
+ *nh_post_func;
+ struct kref nh_kref;
+ struct list_head nh_unregister_item;
+};
+
+enum r2net_system_error {
+ R2NET_ERR_NONE = 0,
+ R2NET_ERR_NO_HNDLR,
+ R2NET_ERR_OVERFLOW,
+ R2NET_ERR_DIED,
+ R2NET_ERR_MAX
+};
+
+struct r2net_status_wait {
+ enum r2net_system_error ns_sys_status;
+ s32 ns_status;
+ int ns_id;
+ wait_queue_head_t ns_wq;
+ struct list_head ns_node_item;
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* just for state dumps */
+struct r2net_send_tracking {
+ struct list_head st_net_debug_item;
+ struct task_struct *st_task;
+ struct r2net_sock_container *st_sc;
+ u32 st_id;
+ u32 st_msg_type;
+ u32 st_msg_key;
+ u8 st_node;
+ ktime_t st_sock_time;
+ ktime_t st_send_time;
+ ktime_t st_status_time;
+};
+#else
+struct r2net_send_tracking {
+ u32 dummy;
+};
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* R2CLUSTER_TCP_INTERNAL_H */
diff --git a/drivers/staging/ramster/r2net.c b/drivers/staging/ramster/r2net.c
new file mode 100644
index 00000000000..2ee02204c43
--- /dev/null
+++ b/drivers/staging/ramster/r2net.c
@@ -0,0 +1,401 @@
+/*
+ * r2net.c
+ *
+ * Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
+ *
+ * Ramster_r2net provides an interface between zcache and r2net.
+ *
+ * FIXME: support more than two nodes
+ */
+
+#include <linux/list.h>
+#include "cluster/tcp.h"
+#include "cluster/nodemanager.h"
+#include "tmem.h"
+#include "zcache.h"
+#include "ramster.h"
+
+#define RAMSTER_TESTING
+
+#define RMSTR_KEY 0x77347734
+
+enum {
+ RMSTR_TMEM_PUT_EPH = 100,
+ RMSTR_TMEM_PUT_PERS,
+ RMSTR_TMEM_ASYNC_GET_REQUEST,
+ RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
+ RMSTR_TMEM_ASYNC_GET_REPLY,
+ RMSTR_TMEM_FLUSH,
+ RMSTR_TMEM_FLOBJ,
+ RMSTR_TMEM_DESTROY_POOL,
+};
+
+#define RMSTR_R2NET_MAX_LEN \
+ (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
+
+#include "cluster/tcp_internal.h"
+
+static struct r2nm_node *r2net_target_node;
+static int r2net_target_nodenum;
+
+int r2net_remote_target_node_set(int node_num)
+{
+ int ret = -1;
+
+ r2net_target_node = r2nm_get_node_by_num(node_num);
+ if (r2net_target_node != NULL) {
+ r2net_target_nodenum = node_num;
+ r2nm_node_put(r2net_target_node);
+ ret = 0;
+ }
+ return ret;
+}
+
+/* FIXME following buffer should be per-cpu, protected by preempt_disable */
+static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
+
+static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ char *pdata;
+ struct tmem_xhandle xh;
+ int found;
+ size_t size = RMSTR_R2NET_MAX_LEN;
+ u16 msgtype = be16_to_cpu(msg->msg_type);
+ bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
+ unsigned long flags;
+
+ xh = *(struct tmem_xhandle *)msg->buf;
+ if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
+ BUG();
+ pdata = ramster_async_get_buf;
+ *(struct tmem_xhandle *)pdata = xh;
+ pdata += sizeof(struct tmem_xhandle);
+ local_irq_save(flags);
+ found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
+ pdata, &size, 1, get_and_free ? 1 : -1);
+ local_irq_restore(flags);
+ if (found < 0) {
+ /* a zero size indicates the get failed */
+ size = 0;
+ }
+ if (size > RMSTR_R2NET_MAX_LEN)
+ BUG();
+ *ret_data = pdata - sizeof(struct tmem_xhandle);
+ /* now make caller (r2net_process_message) handle specially */
+ r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
+ return size + sizeof(struct tmem_xhandle);
+}
+
+static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ char *in = (char *)msg->buf;
+ int datalen = len - sizeof(struct r2net_msg);
+ int ret = -1;
+ struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
+
+ in += sizeof(struct tmem_xhandle);
+ datalen -= sizeof(struct tmem_xhandle);
+ BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
+ ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
+ in, datalen, xh->extra);
+#ifdef RAMSTER_TESTING
+ if (ret == -EEXIST)
+ pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
+#endif
+ return ret;
+}
+
+int ramster_remote_put_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+ int datalen = len - sizeof(struct r2net_msg) -
+ sizeof(struct tmem_xhandle);
+ u16 msgtype = be16_to_cpu(msg->msg_type);
+ bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
+ unsigned long flags;
+ int ret;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
+ local_irq_save(flags);
+ ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
+ p, datalen, 1, ephemeral ? 1 : -1);
+ local_irq_restore(flags);
+ return ret;
+}
+
+int ramster_remote_flush_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ (void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
+ return 0;
+}
+
+int ramster_remote_flobj_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
+ return 0;
+}
+
+int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
+ size_t expect_size, uint8_t expect_cksum,
+ void *extra)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+ u32 msg_type;
+
+ node = r2nm_get_node_by_num(remotenode);
+ if (node == NULL)
+ goto out;
+ xh->client_id = r2nm_this_node(); /* which node is getting */
+ xh->xh_data_cksum = expect_cksum;
+ xh->xh_data_size = expect_size;
+ xh->extra = extra;
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ if (free)
+ msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
+ else
+ msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
+ ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ if (ret < 0) {
+ /* FIXME handle bad message possibilities here? */
+ pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
+ }
+ ret = status;
+out:
+ return ret;
+}
+
+#ifdef RAMSTER_TESTING
+/* leave me here to see if it catches a weird crash */
+static void ramster_check_irq_counts(void)
+{
+ static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
+ int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
+
+ cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
+ if (cur_hardirq_cnt > last_hardirq_cnt) {
+ last_hardirq_cnt = cur_hardirq_cnt;
+ if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
+ pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
+ last_hardirq_cnt);
+ }
+ cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
+ if (cur_softirq_cnt > last_softirq_cnt) {
+ last_softirq_cnt = cur_softirq_cnt;
+ if (!(last_softirq_cnt&(last_softirq_cnt-1)))
+ pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
+ last_softirq_cnt);
+ }
+ cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
+ if (cur_preempt_cnt > last_preempt_cnt) {
+ last_preempt_cnt = cur_preempt_cnt;
+ if (!(last_preempt_cnt&(last_preempt_cnt-1)))
+ pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
+ last_preempt_cnt);
+ }
+}
+#endif
+
+int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
+ bool ephemeral, int *remotenode)
+{
+ int nodenum, ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[2];
+ size_t veclen = 2;
+ u32 msg_type;
+#ifdef RAMSTER_TESTING
+ struct r2net_node *nn;
+#endif
+
+ BUG_ON(size > RMSTR_R2NET_MAX_LEN);
+ xh->client_id = r2nm_this_node(); /* which node is putting */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ vec[1].iov_len = size;
+ vec[1].iov_base = data;
+ node = r2net_target_node;
+ if (!node)
+ goto out;
+
+ nodenum = r2net_target_nodenum;
+
+ r2nm_node_get(node);
+
+#ifdef RAMSTER_TESTING
+ nn = r2net_nn_from_num(nodenum);
+ WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
+#endif
+
+ if (ephemeral)
+ msg_type = RMSTR_TMEM_PUT_EPH;
+ else
+ msg_type = RMSTR_TMEM_PUT_PERS;
+#ifdef RAMSTER_TESTING
+ /* leave me here to see if it catches a weird crash */
+ ramster_check_irq_counts();
+#endif
+
+ ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
+ nodenum, &status);
+#ifdef RAMSTER_TESTING
+ if (ret != 0) {
+ static unsigned long cnt;
+ cnt++;
+ if (!(cnt&(cnt-1)))
+ pr_err("ramster_remote_put: message failed, "
+ "ret=%d, cnt=%lu\n", ret, cnt);
+ ret = -1;
+ }
+#endif
+ if (ret < 0)
+ ret = -1;
+ else {
+ ret = status;
+ *remotenode = nodenum;
+ }
+
+ r2nm_node_put(node);
+out:
+ return ret;
+}
+
+int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+
+ node = r2nm_get_node_by_num(remotenode);
+ BUG_ON(node == NULL);
+ xh->client_id = r2nm_this_node(); /* which node is flushing */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ BUG_ON(irqs_disabled());
+ BUG_ON(in_softirq());
+ ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ return ret;
+}
+
+int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+
+ node = r2nm_get_node_by_num(remotenode);
+ BUG_ON(node == NULL);
+ xh->client_id = r2nm_this_node(); /* which node is flobjing */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ return ret;
+}
+
+/*
+ * Handler registration
+ */
+
+static LIST_HEAD(r2net_unreg_list);
+
+static void r2net_unregister_handlers(void)
+{
+ r2net_unregister_handler_list(&r2net_unreg_list);
+}
+
+int r2net_register_handlers(void)
+{
+ int status;
+
+ status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_put_handler,
+ NULL, NULL, &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_put_handler,
+ NULL, NULL, &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_request_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
+ RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_request_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_reply_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_flush_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_flobj_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ pr_info("ramster: r2net handlers registered\n");
+
+bail:
+ if (status) {
+ r2net_unregister_handlers();
+ pr_err("ramster: couldn't register r2net handlers\n");
+ }
+ return status;
+}
diff --git a/drivers/staging/ramster/ramster.h b/drivers/staging/ramster/ramster.h
new file mode 100644
index 00000000000..0c9455e8dcd
--- /dev/null
+++ b/drivers/staging/ramster/ramster.h
@@ -0,0 +1,118 @@
+/*
+ * ramster.h
+ *
+ * Peer-to-peer transcendent memory
+ *
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _RAMSTER_H_
+#define _RAMSTER_H_
+
+/*
+ * format of remote pampd:
+ * bit 0 == intransit
+ * bit 1 == is_remote... if this bit is set, then
+ * bit 2-9 == remotenode
+ * bit 10-22 == size
+ * bit 23-30 == cksum
+ */
+#define FAKE_PAMPD_INTRANSIT_BITS 1
+#define FAKE_PAMPD_ISREMOTE_BITS 1
+#define FAKE_PAMPD_REMOTENODE_BITS 8
+#define FAKE_PAMPD_REMOTESIZE_BITS 13
+#define FAKE_PAMPD_CHECKSUM_BITS 8
+
+#define FAKE_PAMPD_INTRANSIT_SHIFT 0
+#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
+ FAKE_PAMPD_INTRANSIT_BITS)
+#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
+ FAKE_PAMPD_ISREMOTE_BITS)
+#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
+ FAKE_PAMPD_REMOTENODE_BITS)
+#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
+ FAKE_PAMPD_REMOTESIZE_BITS)
+
+#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
+
+static inline void *pampd_make_remote(int remotenode, size_t size,
+ unsigned char cksum)
+{
+ unsigned long fake_pampd = 0;
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= ((unsigned long)remotenode &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
+ FAKE_PAMPD_REMOTENODE_SHIFT;
+ fake_pampd |= ((unsigned long)size &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
+ FAKE_PAMPD_REMOTESIZE_SHIFT;
+ fake_pampd |= ((unsigned long)cksum &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
+ FAKE_PAMPD_CHECKSUM_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline unsigned int pampd_remote_node(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
+}
+
+static inline unsigned int pampd_remote_size(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
+}
+
+static inline unsigned char pampd_remote_cksum(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
+}
+
+static inline bool pampd_is_remote(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
+}
+
+static inline bool pampd_is_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
+}
+
+/* note that it is a BUG for intransit to be set without isremote also set */
+static inline void *pampd_mark_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
+{
+ unsigned long pampd = (unsigned long)marked_pampd;
+
+ pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
+ pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
+ return (void *)pampd;
+}
+
+extern int ramster_remote_async_get(struct tmem_xhandle *,
+ bool, int, size_t, uint8_t, void *extra);
+extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
+ bool, int *);
+extern int ramster_remote_flush(struct tmem_xhandle *, int);
+extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
+extern int r2net_register_handlers(void);
+extern int r2net_remote_target_node_set(int);
+
+#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
new file mode 100644
index 00000000000..8f2f6892d8d
--- /dev/null
+++ b/drivers/staging/ramster/tmem.c
@@ -0,0 +1,851 @@
+/*
+ * In-kernel transcendent memory (generic implementation)
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ *
+ * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
+ * "handles" (triples containing a pool id, and object id, and an index), to
+ * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
+ * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
+ * set of functions (pamops). Each pampd contains some representation of
+ * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
+ * pages and must be able to insert, find, and delete these pages at a
+ * potential frequency of thousands per second concurrently across many CPUs,
+ * (and, if used with KVM, across many vcpus across many guests).
+ * Tmem is tracked with a hierarchy of data structures, organized by
+ * the elements in a handle-tuple: pool_id, object_id, and page index.
+ * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
+ * Each pool, contains a hash table of rb_trees of tmem_objs. Each
+ * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
+ * nodes called tmem_objnodes. Each leaf pointer in this tree points to
+ * a pampd, which is accessible only through a small set of callbacks
+ * registered by the PAM implementation (see tmem_register_pamops). Tmem
+ * does all memory allocation via a set of callbacks registered by the tmem
+ * host implementation (e.g. see tmem_register_hostops).
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+
+#include "tmem.h"
+
+/* data structure sentinels used for debugging... see tmem.h */
+#define POOL_SENTINEL 0x87658765
+#define OBJ_SENTINEL 0x12345678
+#define OBJNODE_SENTINEL 0xfedcba09
+
+/*
+ * A tmem host implementation must use this function to register callbacks
+ * for memory allocation.
+ */
+static struct tmem_hostops tmem_hostops;
+
+static void tmem_objnode_tree_init(void);
+
+void tmem_register_hostops(struct tmem_hostops *m)
+{
+ tmem_objnode_tree_init();
+ tmem_hostops = *m;
+}
+
+/*
+ * A tmem host implementation must use this function to register
+ * callbacks for a page-accessible memory (PAM) implementation
+ */
+static struct tmem_pamops tmem_pamops;
+
+void tmem_register_pamops(struct tmem_pamops *m)
+{
+ tmem_pamops = *m;
+}
+
+/*
+ * Oid's are potentially very sparse and tmem_objs may have an indeterminately
+ * short life, being added and deleted at a relatively high frequency.
+ * So an rb_tree is an ideal data structure to manage tmem_objs. But because
+ * of the potentially huge number of tmem_objs, each pool manages a hashtable
+ * of rb_trees to reduce search, insert, delete, and rebalancing time.
+ * Each hashbucket also has a lock to manage concurrent access.
+ *
+ * The following routines manage tmem_objs. When any tmem_obj is accessed,
+ * the hashbucket lock must be held.
+ */
+
+/* searches for object==oid in pool, returns locked object if found */
+static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp)
+{
+ struct rb_node *rbnode;
+ struct tmem_obj *obj;
+
+ rbnode = hb->obj_rb_root.rb_node;
+ while (rbnode) {
+ BUG_ON(RB_EMPTY_NODE(rbnode));
+ obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ switch (tmem_oid_compare(oidp, &obj->oid)) {
+ case 0: /* equal */
+ goto out;
+ case -1:
+ rbnode = rbnode->rb_left;
+ break;
+ case 1:
+ rbnode = rbnode->rb_right;
+ break;
+ }
+ }
+ obj = NULL;
+out:
+ return obj;
+}
+
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
+
+/* free an object that has no more pampds in it */
+static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
+{
+ struct tmem_pool *pool;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pampd_count > 0);
+ pool = obj->pool;
+ BUG_ON(pool == NULL);
+ if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
+ tmem_pampd_destroy_all_in_obj(obj);
+ BUG_ON(obj->objnode_tree_root != NULL);
+ BUG_ON((long)obj->objnode_count != 0);
+ atomic_dec(&pool->obj_count);
+ BUG_ON(atomic_read(&pool->obj_count) < 0);
+ INVERT_SENTINEL(obj, OBJ);
+ obj->pool = NULL;
+ tmem_oid_set_invalid(&obj->oid);
+ rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
+}
+
+/*
+ * initialize, and insert an tmem_object_root (called only if find failed)
+ */
+static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp)
+{
+ struct rb_root *root = &hb->obj_rb_root;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct tmem_obj *this;
+
+ BUG_ON(pool == NULL);
+ atomic_inc(&pool->obj_count);
+ obj->objnode_tree_height = 0;
+ obj->objnode_tree_root = NULL;
+ obj->pool = pool;
+ obj->oid = *oidp;
+ obj->objnode_count = 0;
+ obj->pampd_count = 0;
+ (*tmem_pamops.new_obj)(obj);
+ SET_SENTINEL(obj, OBJ);
+ while (*new) {
+ BUG_ON(RB_EMPTY_NODE(*new));
+ this = rb_entry(*new, struct tmem_obj, rb_tree_node);
+ parent = *new;
+ switch (tmem_oid_compare(oidp, &this->oid)) {
+ case 0:
+ BUG(); /* already present; should never happen! */
+ break;
+ case -1:
+ new = &(*new)->rb_left;
+ break;
+ case 1:
+ new = &(*new)->rb_right;
+ break;
+ }
+ }
+ rb_link_node(&obj->rb_tree_node, parent, new);
+ rb_insert_color(&obj->rb_tree_node, root);
+}
+
+/*
+ * Tmem is managed as a set of tmem_pools with certain attributes, such as
+ * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
+ * and all pampds that belong to a tmem_pool. A tmem_pool is created
+ * or deleted relatively rarely (for example, when a filesystem is
+ * mounted or unmounted.
+ */
+
+/* flush all data from a pool and, optionally, free it */
+static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
+{
+ struct rb_node *rbnode;
+ struct tmem_obj *obj;
+ struct tmem_hashbucket *hb = &pool->hashbucket[0];
+ int i;
+
+ BUG_ON(pool == NULL);
+ for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
+ spin_lock(&hb->lock);
+ rbnode = rb_first(&hb->obj_rb_root);
+ while (rbnode != NULL) {
+ obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ rbnode = rb_next(rbnode);
+ tmem_pampd_destroy_all_in_obj(obj);
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ }
+ spin_unlock(&hb->lock);
+ }
+ if (destroy)
+ list_del(&pool->pool_list);
+}
+
+/*
+ * A tmem_obj contains a radix-tree-like tree in which the intermediate
+ * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
+ * is very specialized and tuned for specific uses and is not particularly
+ * suited for use from this code, though some code from the core algorithms has
+ * been reused, thus the copyright notices below). Each tmem_objnode contains
+ * a set of pointers which point to either a set of intermediate tmem_objnodes
+ * or a set of of pampds.
+ *
+ * Portions Copyright (C) 2001 Momchil Velikov
+ * Portions Copyright (C) 2001 Christoph Hellwig
+ * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ */
+
+struct tmem_objnode_tree_path {
+ struct tmem_objnode *objnode;
+ int offset;
+};
+
+/* objnode height_to_maxindex translation */
+static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
+
+static void tmem_objnode_tree_init(void)
+{
+ unsigned int ht, tmp;
+
+ for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
+ tmp = ht * OBJNODE_TREE_MAP_SHIFT;
+ if (tmp >= OBJNODE_TREE_INDEX_BITS)
+ tmem_objnode_tree_h2max[ht] = ~0UL;
+ else
+ tmem_objnode_tree_h2max[ht] =
+ (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
+ }
+}
+
+static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
+{
+ struct tmem_objnode *objnode;
+
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+ objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
+ if (unlikely(objnode == NULL))
+ goto out;
+ objnode->obj = obj;
+ SET_SENTINEL(objnode, OBJNODE);
+ memset(&objnode->slots, 0, sizeof(objnode->slots));
+ objnode->slots_in_use = 0;
+ obj->objnode_count++;
+out:
+ return objnode;
+}
+
+static void tmem_objnode_free(struct tmem_objnode *objnode)
+{
+ struct tmem_pool *pool;
+ int i;
+
+ BUG_ON(objnode == NULL);
+ for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
+ BUG_ON(objnode->slots[i] != NULL);
+ ASSERT_SENTINEL(objnode, OBJNODE);
+ INVERT_SENTINEL(objnode, OBJNODE);
+ BUG_ON(objnode->obj == NULL);
+ ASSERT_SENTINEL(objnode->obj, OBJ);
+ pool = objnode->obj->pool;
+ BUG_ON(pool == NULL);
+ ASSERT_SENTINEL(pool, POOL);
+ objnode->obj->objnode_count--;
+ objnode->obj = NULL;
+ (*tmem_hostops.objnode_free)(objnode, pool);
+}
+
+/*
+ * lookup index in object and return associated pampd (or NULL if not found)
+ */
+static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+{
+ unsigned int height, shift;
+ struct tmem_objnode **slot = NULL;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+
+ height = obj->objnode_tree_height;
+ if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
+ goto out;
+ if (height == 0 && obj->objnode_tree_root) {
+ slot = &obj->objnode_tree_root;
+ goto out;
+ }
+ shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
+ slot = &obj->objnode_tree_root;
+ while (height > 0) {
+ if (*slot == NULL)
+ goto out;
+ slot = (struct tmem_objnode **)
+ ((*slot)->slots +
+ ((index >> shift) & OBJNODE_TREE_MAP_MASK));
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ }
+out:
+ return slot != NULL ? (void **)slot : NULL;
+}
+
+static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+{
+ struct tmem_objnode **slot;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
+ return slot != NULL ? *slot : NULL;
+}
+
+static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
+ void *new_pampd, bool no_free)
+{
+ struct tmem_objnode **slot;
+ void *ret = NULL;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
+ if ((slot != NULL) && (*slot != NULL)) {
+ void *old_pampd = *(void **)slot;
+ *(void **)slot = new_pampd;
+ if (!no_free)
+ (*tmem_pamops.free)(old_pampd, obj->pool,
+ NULL, 0, false);
+ ret = new_pampd;
+ }
+ return ret;
+}
+
+static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
+ void *pampd)
+{
+ int ret = 0;
+ struct tmem_objnode *objnode = NULL, *newnode, *slot;
+ unsigned int height, shift;
+ int offset = 0;
+
+ /* if necessary, extend the tree to be higher */
+ if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
+ height = obj->objnode_tree_height + 1;
+ if (index > tmem_objnode_tree_h2max[height])
+ while (index > tmem_objnode_tree_h2max[height])
+ height++;
+ if (obj->objnode_tree_root == NULL) {
+ obj->objnode_tree_height = height;
+ goto insert;
+ }
+ do {
+ newnode = tmem_objnode_alloc(obj);
+ if (!newnode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ newnode->slots[0] = obj->objnode_tree_root;
+ newnode->slots_in_use = 1;
+ obj->objnode_tree_root = newnode;
+ obj->objnode_tree_height++;
+ } while (height > obj->objnode_tree_height);
+ }
+insert:
+ slot = obj->objnode_tree_root;
+ height = obj->objnode_tree_height;
+ shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
+ while (height > 0) {
+ if (slot == NULL) {
+ /* add a child objnode. */
+ slot = tmem_objnode_alloc(obj);
+ if (!slot) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (objnode) {
+
+ objnode->slots[offset] = slot;
+ objnode->slots_in_use++;
+ } else
+ obj->objnode_tree_root = slot;
+ }
+ /* go down a level */
+ offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
+ objnode = slot;
+ slot = objnode->slots[offset];
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ }
+ BUG_ON(slot != NULL);
+ if (objnode) {
+ objnode->slots_in_use++;
+ objnode->slots[offset] = pampd;
+ } else
+ obj->objnode_tree_root = pampd;
+ obj->pampd_count++;
+out:
+ return ret;
+}
+
+static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
+{
+ struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
+ struct tmem_objnode_tree_path *pathp = path;
+ struct tmem_objnode *slot = NULL;
+ unsigned int height, shift;
+ int offset;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+ height = obj->objnode_tree_height;
+ if (index > tmem_objnode_tree_h2max[height])
+ goto out;
+ slot = obj->objnode_tree_root;
+ if (height == 0 && obj->objnode_tree_root) {
+ obj->objnode_tree_root = NULL;
+ goto out;
+ }
+ shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
+ pathp->objnode = NULL;
+ do {
+ if (slot == NULL)
+ goto out;
+ pathp++;
+ offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
+ pathp->offset = offset;
+ pathp->objnode = slot;
+ slot = slot->slots[offset];
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ } while (height > 0);
+ if (slot == NULL)
+ goto out;
+ while (pathp->objnode) {
+ pathp->objnode->slots[pathp->offset] = NULL;
+ pathp->objnode->slots_in_use--;
+ if (pathp->objnode->slots_in_use) {
+ if (pathp->objnode == obj->objnode_tree_root) {
+ while (obj->objnode_tree_height > 0 &&
+ obj->objnode_tree_root->slots_in_use == 1 &&
+ obj->objnode_tree_root->slots[0]) {
+ struct tmem_objnode *to_free =
+ obj->objnode_tree_root;
+
+ obj->objnode_tree_root =
+ to_free->slots[0];
+ obj->objnode_tree_height--;
+ to_free->slots[0] = NULL;
+ to_free->slots_in_use = 0;
+ tmem_objnode_free(to_free);
+ }
+ }
+ goto out;
+ }
+ tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
+ pathp--;
+ }
+ obj->objnode_tree_height = 0;
+ obj->objnode_tree_root = NULL;
+
+out:
+ if (slot != NULL)
+ obj->pampd_count--;
+ BUG_ON(obj->pampd_count < 0);
+ return slot;
+}
+
+/* recursively walk the objnode_tree destroying pampds and objnodes */
+static void tmem_objnode_node_destroy(struct tmem_obj *obj,
+ struct tmem_objnode *objnode,
+ unsigned int ht)
+{
+ int i;
+
+ if (ht == 0)
+ return;
+ for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
+ if (objnode->slots[i]) {
+ if (ht == 1) {
+ obj->pampd_count--;
+ (*tmem_pamops.free)(objnode->slots[i],
+ obj->pool, NULL, 0, true);
+ objnode->slots[i] = NULL;
+ continue;
+ }
+ tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
+ tmem_objnode_free(objnode->slots[i]);
+ objnode->slots[i] = NULL;
+ }
+ }
+}
+
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
+{
+ if (obj->objnode_tree_root == NULL)
+ return;
+ if (obj->objnode_tree_height == 0) {
+ obj->pampd_count--;
+ (*tmem_pamops.free)(obj->objnode_tree_root,
+ obj->pool, NULL, 0, true);
+ } else {
+ tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
+ obj->objnode_tree_height);
+ tmem_objnode_free(obj->objnode_tree_root);
+ obj->objnode_tree_height = 0;
+ }
+ obj->objnode_tree_root = NULL;
+ (*tmem_pamops.free_obj)(obj->pool, obj);
+}
+
+/*
+ * Tmem is operated on by a set of well-defined actions:
+ * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
+ * (The tmem ABI allows for subpages and exchanges but these operations
+ * are not included in this implementation.)
+ *
+ * These "tmem core" operations are implemented in the following functions.
+ */
+
+/*
+ * "Put" a page, e.g. copy a page from the kernel into newly allocated
+ * PAM space (if such space is available). Tmem_put is complicated by
+ * a corner case: What if a page with matching handle already exists in
+ * tmem? To guarantee coherency, one of two actions is necessary: Either
+ * the data for the page must be overwritten, or the page must be
+ * "flushed" so that the data is not accessible to a subsequent "get".
+ * Since these "duplicate puts" are relatively rare, this implementation
+ * always flushes for simplicity.
+ */
+int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
+ char *data, size_t size, bool raw, int ephemeral)
+{
+ struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
+ void *pampd = NULL, *pampd_del = NULL;
+ int ret = -ENOMEM;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = objfound = tmem_obj_find(hb, oidp);
+ if (obj != NULL) {
+ pampd = tmem_pampd_lookup_in_obj(objfound, index);
+ if (pampd != NULL) {
+ /* if found, is a dup put, flush the old one */
+ pampd_del = tmem_pampd_delete_from_obj(obj, index);
+ BUG_ON(pampd_del != pampd);
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
+ if (obj->pampd_count == 0) {
+ objnew = obj;
+ objfound = NULL;
+ }
+ pampd = NULL;
+ }
+ } else {
+ obj = objnew = (*tmem_hostops.obj_alloc)(pool);
+ if (unlikely(obj == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ tmem_obj_init(obj, hb, pool, oidp);
+ }
+ BUG_ON(obj == NULL);
+ BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
+ pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
+ obj->pool, &obj->oid, index);
+ if (unlikely(pampd == NULL))
+ goto free;
+ ret = tmem_pampd_add_to_obj(obj, index, pampd);
+ if (unlikely(ret == -ENOMEM))
+ /* may have partially built objnode tree ("stump") */
+ goto delete_and_free;
+ goto out;
+
+delete_and_free:
+ (void)tmem_pampd_delete_from_obj(obj, index);
+free:
+ if (pampd)
+ (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
+ if (objnew) {
+ tmem_obj_free(objnew, hb);
+ (*tmem_hostops.obj_free)(objnew, pool);
+ }
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, struct tmem_obj **ret_obj,
+ void **saved_hb)
+{
+ struct tmem_hashbucket *hb;
+ struct tmem_obj *obj = NULL;
+ void *pampd = NULL;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (likely(obj != NULL))
+ pampd = tmem_pampd_lookup_in_obj(obj, index);
+ *ret_obj = obj;
+ *saved_hb = (void *)hb;
+ /* note, hashbucket remains locked */
+ return pampd;
+}
+
+void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
+ void *pampd, void *saved_hb, bool delete)
+{
+ struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
+
+ BUG_ON(!spin_is_locked(&hb->lock));
+ if (pampd != NULL) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
+ } else if (delete) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_delete_from_obj(obj, index);
+ }
+ spin_unlock(&hb->lock);
+}
+
+static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
+ struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, bool free, char *data)
+{
+ void *old_pampd = *ppampd, *new_pampd = NULL;
+ bool intransit = false;
+ int ret = 0;
+
+
+ if (!is_ephemeral(pool))
+ new_pampd = (*tmem_pamops.repatriate_preload)(
+ old_pampd, pool, oidp, index, &intransit);
+ if (intransit)
+ ret = -EAGAIN;
+ else if (new_pampd != NULL)
+ *ppampd = new_pampd;
+ /* must release the hb->lock else repatriate can't sleep */
+ spin_unlock(&hb->lock);
+ if (!intransit)
+ ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
+ oidp, index, free, data);
+ return ret;
+}
+
+/*
+ * "Get" a page, e.g. if one can be found, copy the tmem page with the
+ * matching handle from PAM space to the kernel. By tmem definition,
+ * when a "get" is successful on an ephemeral page, the page is "flushed",
+ * and when a "get" is successful on a persistent page, the page is retained
+ * in tmem. Note that to preserve
+ * coherency, "get" can never be skipped if tmem contains the data.
+ * That is, if a get is done with a certain handle and fails, any
+ * subsequent "get" must also fail (unless of course there is a
+ * "put" done with the same handle).
+
+ */
+int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
+ char *data, size_t *size, bool raw, int get_and_free)
+{
+ struct tmem_obj *obj;
+ void *pampd;
+ bool ephemeral = is_ephemeral(pool);
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+ bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
+ bool lock_held = 0;
+ void **ppampd;
+
+again:
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ lock_held = 1;
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ ppampd = __tmem_pampd_lookup_in_obj(obj, index);
+ if (ppampd == NULL)
+ goto out;
+ if (tmem_pamops.is_remote(*ppampd)) {
+ ret = tmem_repatriate(ppampd, hb, pool, oidp,
+ index, free, data);
+ lock_held = 0; /* note hb->lock has been unlocked */
+ if (ret == -EAGAIN) {
+ /* rare I think, but should cond_resched()??? */
+ usleep_range(10, 1000);
+ goto again;
+ } else if (ret != 0) {
+ if (ret != -ENOENT)
+ pr_err("UNTESTED case in tmem_get, ret=%d\n",
+ ret);
+ ret = -1;
+ goto out;
+ }
+ goto out;
+ }
+ if (free)
+ pampd = tmem_pampd_delete_from_obj(obj, index);
+ else
+ pampd = tmem_pampd_lookup_in_obj(obj, index);
+ if (pampd == NULL)
+ goto out;
+ if (free) {
+ if (obj->pampd_count == 0) {
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ obj = NULL;
+ }
+ }
+ if (free)
+ ret = (*tmem_pamops.get_data_and_free)(
+ data, size, raw, pampd, pool, oidp, index);
+ else
+ ret = (*tmem_pamops.get_data)(
+ data, size, raw, pampd, pool, oidp, index);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+out:
+ if (lock_held)
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * If a page in tmem matches the handle, "flush" this page from tmem such
+ * that any subsequent "get" does not succeed (unless, of course, there
+ * was another "put" with the same handle).
+ */
+int tmem_flush_page(struct tmem_pool *pool,
+ struct tmem_oid *oidp, uint32_t index)
+{
+ struct tmem_obj *obj;
+ void *pampd;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ pampd = tmem_pampd_delete_from_obj(obj, index);
+ if (pampd == NULL)
+ goto out;
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
+ if (obj->pampd_count == 0) {
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ }
+ ret = 0;
+
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * If a page in tmem matches the handle, replace the page so that any
+ * subsequent "get" gets the new page. Returns the new page if
+ * there was a page to replace, else returns NULL.
+ */
+int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, void *new_pampd)
+{
+ struct tmem_obj *obj;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
+ ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * "Flush" all pages in tmem matching this oid.
+ */
+int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
+{
+ struct tmem_obj *obj;
+ struct tmem_hashbucket *hb;
+ int ret = -1;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ tmem_pampd_destroy_all_in_obj(obj);
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ ret = 0;
+
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
+ * all subsequent access to this tmem_pool.
+ */
+int tmem_destroy_pool(struct tmem_pool *pool)
+{
+ int ret = -1;
+
+ if (pool == NULL)
+ goto out;
+ tmem_pool_flush(pool, 1);
+ ret = 0;
+out:
+ return ret;
+}
+
+static LIST_HEAD(tmem_global_pool_list);
+
+/*
+ * Create a new tmem_pool with the provided flag and return
+ * a pool id provided by the tmem host implementation.
+ */
+void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
+{
+ int persistent = flags & TMEM_POOL_PERSIST;
+ int shared = flags & TMEM_POOL_SHARED;
+ struct tmem_hashbucket *hb = &pool->hashbucket[0];
+ int i;
+
+ for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
+ hb->obj_rb_root = RB_ROOT;
+ spin_lock_init(&hb->lock);
+ }
+ INIT_LIST_HEAD(&pool->pool_list);
+ atomic_set(&pool->obj_count, 0);
+ SET_SENTINEL(pool, POOL);
+ list_add_tail(&pool->pool_list, &tmem_global_pool_list);
+ pool->persistent = persistent;
+ pool->shared = shared;
+}
diff --git a/drivers/staging/ramster/tmem.h b/drivers/staging/ramster/tmem.h
new file mode 100644
index 00000000000..47f1918c831
--- /dev/null
+++ b/drivers/staging/ramster/tmem.h
@@ -0,0 +1,244 @@
+/*
+ * tmem.h
+ *
+ * Transcendent memory
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _TMEM_H_
+#define _TMEM_H_
+
+#include <linux/highmem.h>
+#include <linux/hash.h>
+#include <linux/atomic.h>
+
+/*
+ * These are pre-defined by the Xen<->Linux ABI
+ */
+#define TMEM_PUT_PAGE 4
+#define TMEM_GET_PAGE 5
+#define TMEM_FLUSH_PAGE 6
+#define TMEM_FLUSH_OBJECT 7
+#define TMEM_POOL_PERSIST 1
+#define TMEM_POOL_SHARED 2
+#define TMEM_POOL_PRECOMPRESSED 4
+#define TMEM_POOL_PAGESIZE_SHIFT 4
+#define TMEM_POOL_PAGESIZE_MASK 0xf
+#define TMEM_POOL_RESERVED_BITS 0x00ffff00
+
+/*
+ * sentinels have proven very useful for debugging but can be removed
+ * or disabled before final merge.
+ */
+#define SENTINELS
+#ifdef SENTINELS
+#define DECL_SENTINEL uint32_t sentinel;
+#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
+#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
+#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
+#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
+#else
+#define DECL_SENTINEL
+#define SET_SENTINEL(_x, _y) do { } while (0)
+#define INVERT_SENTINEL(_x, _y) do { } while (0)
+#define ASSERT_SENTINEL(_x, _y) do { } while (0)
+#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
+#endif
+
+#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
+
+/*
+ * A pool is the highest-level data structure managed by tmem and
+ * usually corresponds to a large independent set of pages such as
+ * a filesystem. Each pool has an id, and certain attributes and counters.
+ * It also contains a set of hash buckets, each of which contains an rbtree
+ * of objects and a lock to manage concurrency within the pool.
+ */
+
+#define TMEM_HASH_BUCKET_BITS 8
+#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
+
+struct tmem_hashbucket {
+ struct rb_root obj_rb_root;
+ spinlock_t lock;
+};
+
+struct tmem_pool {
+ void *client; /* "up" for some clients, avoids table lookup */
+ struct list_head pool_list;
+ uint32_t pool_id;
+ bool persistent;
+ bool shared;
+ atomic_t obj_count;
+ atomic_t refcount;
+ struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
+ DECL_SENTINEL
+};
+
+#define is_persistent(_p) (_p->persistent)
+#define is_ephemeral(_p) (!(_p->persistent))
+
+/*
+ * An object id ("oid") is large: 192-bits (to ensure, for example, files
+ * in a modern filesystem can be uniquely identified).
+ */
+
+struct tmem_oid {
+ uint64_t oid[3];
+};
+
+struct tmem_xhandle {
+ uint8_t client_id;
+ uint8_t xh_data_cksum;
+ uint16_t xh_data_size;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ void *extra;
+};
+
+static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp,
+ uint32_t index)
+{
+ struct tmem_xhandle xh;
+ xh.client_id = client_id;
+ xh.xh_data_cksum = (uint8_t)-1;
+ xh.xh_data_size = (uint16_t)-1;
+ xh.pool_id = pool->pool_id;
+ xh.oid = *oidp;
+ xh.index = index;
+ return xh;
+}
+
+static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
+{
+ oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
+}
+
+static inline bool tmem_oid_valid(struct tmem_oid *oidp)
+{
+ return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
+ oidp->oid[2] != -1UL;
+}
+
+static inline int tmem_oid_compare(struct tmem_oid *left,
+ struct tmem_oid *right)
+{
+ int ret;
+
+ if (left->oid[2] == right->oid[2]) {
+ if (left->oid[1] == right->oid[1]) {
+ if (left->oid[0] == right->oid[0])
+ ret = 0;
+ else if (left->oid[0] < right->oid[0])
+ ret = -1;
+ else
+ return 1;
+ } else if (left->oid[1] < right->oid[1])
+ ret = -1;
+ else
+ ret = 1;
+ } else if (left->oid[2] < right->oid[2])
+ ret = -1;
+ else
+ ret = 1;
+ return ret;
+}
+
+static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
+{
+ return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
+ TMEM_HASH_BUCKET_BITS);
+}
+
+/*
+ * A tmem_obj contains an identifier (oid), pointers to the parent
+ * pool and the rb_tree to which it belongs, counters, and an ordered
+ * set of pampds, structured in a radix-tree-like tree. The intermediate
+ * nodes of the tree are called tmem_objnodes.
+ */
+
+struct tmem_objnode;
+
+struct tmem_obj {
+ struct tmem_oid oid;
+ struct tmem_pool *pool;
+ struct rb_node rb_tree_node;
+ struct tmem_objnode *objnode_tree_root;
+ unsigned int objnode_tree_height;
+ unsigned long objnode_count;
+ long pampd_count;
+ /* for current design of ramster, all pages belonging to
+ * an object reside on the same remotenode and extra is
+ * used to record the number of the remotenode so a
+ * flush-object operation can specify it */
+ void *extra; /* for use by pampd implementation */
+ DECL_SENTINEL
+};
+
+#define OBJNODE_TREE_MAP_SHIFT 6
+#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
+#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
+#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define OBJNODE_TREE_MAX_PATH \
+ (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
+
+struct tmem_objnode {
+ struct tmem_obj *obj;
+ DECL_SENTINEL
+ void *slots[OBJNODE_TREE_MAP_SIZE];
+ unsigned int slots_in_use;
+};
+
+/* pampd abstract datatype methods provided by the PAM implementation */
+struct tmem_pamops {
+ void *(*create)(char *, size_t, bool, int,
+ struct tmem_pool *, struct tmem_oid *, uint32_t);
+ int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t);
+ int (*get_data_and_free)(char *, size_t *, bool, void *,
+ struct tmem_pool *, struct tmem_oid *,
+ uint32_t);
+ void (*free)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool);
+ void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
+ bool (*is_remote)(void *);
+ void *(*repatriate_preload)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool *);
+ int (*repatriate)(void *, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool, void *);
+ void (*new_obj)(struct tmem_obj *);
+ int (*replace_in_obj)(void *, struct tmem_obj *);
+};
+extern void tmem_register_pamops(struct tmem_pamops *m);
+
+/* memory allocation methods provided by the host implementation */
+struct tmem_hostops {
+ struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
+ void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
+ struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
+ void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
+};
+extern void tmem_register_hostops(struct tmem_hostops *m);
+
+/* core tmem accessor functions */
+extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ char *, size_t, bool, int);
+extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ char *, size_t *, bool, int);
+extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ void *);
+extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index, struct tmem_obj **,
+ void **);
+extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
+ void *, void *, bool);
+extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index);
+extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
+extern int tmem_destroy_pool(struct tmem_pool *);
+extern void tmem_new_pool(struct tmem_pool *, uint32_t);
+#endif /* _TMEM_H */
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d..1f9c5082b6d 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
diff --git a/drivers/staging/zram/xvmalloc.h b/drivers/staging/ramster/xvmalloc.h
index 5b1a81aa5fa..5b1a81aa5fa 100644
--- a/drivers/staging/zram/xvmalloc.h
+++ b/drivers/staging/ramster/xvmalloc.h
diff --git a/drivers/staging/zram/xvmalloc_int.h b/drivers/staging/ramster/xvmalloc_int.h
index b5f1f7febcf..b5f1f7febcf 100644
--- a/drivers/staging/zram/xvmalloc_int.h
+++ b/drivers/staging/ramster/xvmalloc_int.h
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
new file mode 100644
index 00000000000..36d53ed9d71
--- /dev/null
+++ b/drivers/staging/ramster/zcache-main.c
@@ -0,0 +1,3320 @@
+/*
+ * zcache.c
+ *
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2010,2011, Nitin Gupta
+ *
+ * Zcache provides an in-kernel "host implementation" for transcendent memory
+ * and, thus indirectly, for cleancache and frontswap. Zcache includes two
+ * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * 1) "compression buddies" ("zbud") is used for ephemeral pages
+ * 2) xvmalloc is used for persistent pages.
+ * Xvmalloc (based on the TLSF allocator) has very low fragmentation
+ * so maximizes space efficiency, while zbud allows pairs (and potentially,
+ * in the future, more than a pair of) compressed pages to be closely linked
+ * so that reclaiming can be done via the kernel's physical-page-oriented
+ * "shrinker" interface.
+ *
+ * [1] For a definition of page-accessible memory (aka PAM), see:
+ * http://marc.info/?l=linux-mm&m=127811271605009
+ * RAMSTER TODO:
+ * - handle remotifying of buddied pages (see zbud_remotify_zbpg)
+ * - kernel boot params: nocleancache/nofrontswap don't always work?!?
+ */
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/lzo.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/math64.h>
+#include "tmem.h"
+#include "zcache.h"
+#include "ramster.h"
+#include "cluster/tcp.h"
+
+#include "xvmalloc.h" /* temporary until change to zsmalloc */
+
+#define RAMSTER_TESTING
+
+#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
+#error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
+#endif
+#ifdef CONFIG_CLEANCACHE
+#include <linux/cleancache.h>
+#endif
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+#endif
+
+enum ramster_remotify_op {
+ RAMSTER_REMOTIFY_EPH_PUT,
+ RAMSTER_REMOTIFY_PERS_PUT,
+ RAMSTER_REMOTIFY_FLUSH_PAGE,
+ RAMSTER_REMOTIFY_FLUSH_OBJ,
+ RAMSTER_INTRANSIT_PERS
+};
+
+struct ramster_remotify_hdr {
+ enum ramster_remotify_op op;
+ struct list_head list;
+};
+
+#define ZBH_SENTINEL 0x43214321
+#define ZBPG_SENTINEL 0xdeadbeef
+
+#define ZBUD_MAX_BUDS 2
+
+struct zbud_hdr {
+ struct ramster_remotify_hdr rem_op;
+ uint16_t client_id;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ uint16_t size; /* compressed size in bytes, zero means unused */
+ DECL_SENTINEL
+};
+
+#define ZVH_SENTINEL 0x43214321
+static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
+
+struct zv_hdr {
+ struct ramster_remotify_hdr rem_op;
+ uint16_t client_id;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ DECL_SENTINEL
+};
+
+struct flushlist_node {
+ struct ramster_remotify_hdr rem_op;
+ struct tmem_xhandle xh;
+};
+
+union {
+ struct ramster_remotify_hdr rem_op;
+ struct zv_hdr zv;
+ struct zbud_hdr zbud;
+ struct flushlist_node flist;
+} remotify_list_node;
+
+static LIST_HEAD(zcache_rem_op_list);
+static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
+
+#if 0
+/* this is more aggressive but may cause other problems? */
+#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
+#else
+#define ZCACHE_GFP_MASK \
+ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
+#endif
+
+#define MAX_POOLS_PER_CLIENT 16
+
+#define MAX_CLIENTS 16
+#define LOCAL_CLIENT ((uint16_t)-1)
+
+MODULE_LICENSE("GPL");
+
+struct zcache_client {
+ struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
+ struct xv_pool *xvpool;
+ bool allocated;
+ atomic_t refcount;
+};
+
+static struct zcache_client zcache_host;
+static struct zcache_client zcache_clients[MAX_CLIENTS];
+
+static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
+{
+ BUG_ON(cli == NULL);
+ if (cli == &zcache_host)
+ return LOCAL_CLIENT;
+ return cli - &zcache_clients[0];
+}
+
+static inline bool is_local_client(struct zcache_client *cli)
+{
+ return cli == &zcache_host;
+}
+
+/**********
+ * Compression buddies ("zbud") provides for packing two (or, possibly
+ * in the future, more) compressed ephemeral pages into a single "raw"
+ * (physical) page and tracking them with data structures so that
+ * the raw pages can be easily reclaimed.
+ *
+ * A zbud page ("zbpg") is an aligned page containing a list_head,
+ * a lock, and two "zbud headers". The remainder of the physical
+ * page is divided up into aligned 64-byte "chunks" which contain
+ * the compressed data for zero, one, or two zbuds. Each zbpg
+ * resides on: (1) an "unused list" if it has no zbuds; (2) a
+ * "buddied" list if it is fully populated with two zbuds; or
+ * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
+ * the one unbuddied zbud uses. The data inside a zbpg cannot be
+ * read or written unless the zbpg's lock is held.
+ */
+
+struct zbud_page {
+ struct list_head bud_list;
+ spinlock_t lock;
+ struct zbud_hdr buddy[ZBUD_MAX_BUDS];
+ DECL_SENTINEL
+ /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
+};
+
+#define CHUNK_SHIFT 6
+#define CHUNK_SIZE (1 << CHUNK_SHIFT)
+#define CHUNK_MASK (~(CHUNK_SIZE-1))
+#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
+ CHUNK_MASK) >> CHUNK_SHIFT)
+#define MAX_CHUNK (NCHUNKS-1)
+
+static struct {
+ struct list_head list;
+ unsigned count;
+} zbud_unbuddied[NCHUNKS];
+/* list N contains pages with N chunks USED and NCHUNKS-N unused */
+/* element 0 is never used but optimizing that isn't worth it */
+static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
+
+struct list_head zbud_buddied_list;
+static unsigned long zcache_zbud_buddied_count;
+
+/* protects the buddied list and all unbuddied lists */
+static DEFINE_SPINLOCK(zbud_budlists_spinlock);
+
+static atomic_t zcache_zbud_curr_raw_pages;
+static atomic_t zcache_zbud_curr_zpages;
+static unsigned long zcache_zbud_curr_zbytes;
+static unsigned long zcache_zbud_cumul_zpages;
+static unsigned long zcache_zbud_cumul_zbytes;
+static unsigned long zcache_compress_poor;
+static unsigned long zcache_policy_percent_exceeded;
+static unsigned long zcache_mean_compress_poor;
+
+/*
+ * RAMster counters
+ * - Remote pages are pages with a local pampd but the data is remote
+ * - Foreign pages are pages stored locally but belonging to another node
+ */
+static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
+static unsigned long ramster_pers_remotify_enable;
+static unsigned long ramster_eph_remotify_enable;
+static unsigned long ramster_eph_pages_remoted;
+static unsigned long ramster_eph_pages_remote_failed;
+static unsigned long ramster_pers_pages_remoted;
+static unsigned long ramster_pers_pages_remote_failed;
+static unsigned long ramster_pers_pages_remote_nomem;
+static unsigned long ramster_remote_objects_flushed;
+static unsigned long ramster_remote_object_flushes_failed;
+static unsigned long ramster_remote_pages_flushed;
+static unsigned long ramster_remote_page_flushes_failed;
+static unsigned long ramster_remote_eph_pages_succ_get;
+static unsigned long ramster_remote_pers_pages_succ_get;
+static unsigned long ramster_remote_eph_pages_unsucc_get;
+static unsigned long ramster_remote_pers_pages_unsucc_get;
+static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
+static unsigned long ramster_curr_flnode_count_max;
+static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_eph_pampd_count_max;
+static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_pers_pampd_count_max;
+
+/* forward references */
+static void *zcache_get_free_page(void);
+static void zcache_free_page(void *p);
+
+/*
+ * zbud helper functions
+ */
+
+static inline unsigned zbud_max_buddy_size(void)
+{
+ return MAX_CHUNK << CHUNK_SHIFT;
+}
+
+static inline unsigned zbud_size_to_chunks(unsigned size)
+{
+ BUG_ON(size == 0 || size > zbud_max_buddy_size());
+ return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
+}
+
+static inline int zbud_budnum(struct zbud_hdr *zh)
+{
+ unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
+ struct zbud_page *zbpg = NULL;
+ unsigned budnum = -1U;
+ int i;
+
+ for (i = 0; i < ZBUD_MAX_BUDS; i++)
+ if (offset == offsetof(typeof(*zbpg), buddy[i])) {
+ budnum = i;
+ break;
+ }
+ BUG_ON(budnum == -1U);
+ return budnum;
+}
+
+static char *zbud_data(struct zbud_hdr *zh, unsigned size)
+{
+ struct zbud_page *zbpg;
+ char *p;
+ unsigned budnum;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ budnum = zbud_budnum(zh);
+ BUG_ON(size == 0 || size > zbud_max_buddy_size());
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ ASSERT_SPINLOCK(&zbpg->lock);
+ p = (char *)zbpg;
+ if (budnum == 0)
+ p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
+ CHUNK_MASK);
+ else if (budnum == 1)
+ p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
+ return p;
+}
+
+static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
+{
+ struct zbud_page *zbpg;
+ char *p;
+ unsigned budnum;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ budnum = zbud_budnum(zh);
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbpg->lock);
+ BUG_ON(zh->size > *size);
+ p = (char *)zbpg;
+ if (budnum == 0)
+ p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
+ CHUNK_MASK);
+ else if (budnum == 1)
+ p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
+ /* client should be filled in by caller */
+ memcpy(data, p, zh->size);
+ *size = zh->size;
+ spin_unlock(&zbpg->lock);
+}
+
+/*
+ * zbud raw page management
+ */
+
+static struct zbud_page *zbud_alloc_raw_page(void)
+{
+ struct zbud_page *zbpg = NULL;
+ struct zbud_hdr *zh0, *zh1;
+ zbpg = zcache_get_free_page();
+ if (likely(zbpg != NULL)) {
+ INIT_LIST_HEAD(&zbpg->bud_list);
+ zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
+ spin_lock_init(&zbpg->lock);
+ atomic_inc(&zcache_zbud_curr_raw_pages);
+ INIT_LIST_HEAD(&zbpg->bud_list);
+ SET_SENTINEL(zbpg, ZBPG);
+ zh0->size = 0; zh1->size = 0;
+ tmem_oid_set_invalid(&zh0->oid);
+ tmem_oid_set_invalid(&zh1->oid);
+ }
+ return zbpg;
+}
+
+static void zbud_free_raw_page(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
+
+ ASSERT_SENTINEL(zbpg, ZBPG);
+ BUG_ON(!list_empty(&zbpg->bud_list));
+ ASSERT_SPINLOCK(&zbpg->lock);
+ BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
+ BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
+ INVERT_SENTINEL(zbpg, ZBPG);
+ spin_unlock(&zbpg->lock);
+ atomic_dec(&zcache_zbud_curr_raw_pages);
+ zcache_free_page(zbpg);
+}
+
+/*
+ * core zbud handling routines
+ */
+
+static unsigned zbud_free(struct zbud_hdr *zh)
+{
+ unsigned size;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ BUG_ON(!tmem_oid_valid(&zh->oid));
+ size = zh->size;
+ BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
+ zh->size = 0;
+ tmem_oid_set_invalid(&zh->oid);
+ INVERT_SENTINEL(zh, ZBH);
+ zcache_zbud_curr_zbytes -= size;
+ atomic_dec(&zcache_zbud_curr_zpages);
+ return size;
+}
+
+static void zbud_free_and_delist(struct zbud_hdr *zh)
+{
+ unsigned chunks;
+ struct zbud_hdr *zh_other;
+ unsigned budnum = zbud_budnum(zh), size;
+ struct zbud_page *zbpg =
+ container_of(zh, struct zbud_page, buddy[budnum]);
+
+ /* FIXME, should be BUG_ON, pool destruction path doesn't disable
+ * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
+ * tmem_objnode_node_destroy()-> zcache_pampd_free() */
+ WARN_ON(!irqs_disabled());
+ spin_lock(&zbpg->lock);
+ if (list_empty(&zbpg->bud_list)) {
+ /* ignore zombie page... see zbud_evict_pages() */
+ spin_unlock(&zbpg->lock);
+ return;
+ }
+ size = zbud_free(zh);
+ ASSERT_SPINLOCK(&zbpg->lock);
+ zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
+ if (zh_other->size == 0) { /* was unbuddied: unlist and free */
+ chunks = zbud_size_to_chunks(size) ;
+ spin_lock(&zbud_budlists_spinlock);
+ BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
+ list_del_init(&zbpg->bud_list);
+ zbud_unbuddied[chunks].count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zbud_free_raw_page(zbpg);
+ } else { /* was buddied: move remaining buddy to unbuddied list */
+ chunks = zbud_size_to_chunks(zh_other->size) ;
+ spin_lock(&zbud_budlists_spinlock);
+ list_del_init(&zbpg->bud_list);
+ zcache_zbud_buddied_count--;
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
+ zbud_unbuddied[chunks].count++;
+ spin_unlock(&zbud_budlists_spinlock);
+ spin_unlock(&zbpg->lock);
+ }
+}
+
+static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
+ struct tmem_oid *oid,
+ uint32_t index, struct page *page,
+ void *cdata, unsigned size)
+{
+ struct zbud_hdr *zh0, *zh1, *zh = NULL;
+ struct zbud_page *zbpg = NULL, *ztmp;
+ unsigned nchunks;
+ char *to;
+ int i, found_good_buddy = 0;
+
+ nchunks = zbud_size_to_chunks(size) ;
+ for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
+ spin_lock(&zbud_budlists_spinlock);
+ if (!list_empty(&zbud_unbuddied[i].list)) {
+ list_for_each_entry_safe(zbpg, ztmp,
+ &zbud_unbuddied[i].list, bud_list) {
+ if (spin_trylock(&zbpg->lock)) {
+ found_good_buddy = i;
+ goto found_unbuddied;
+ }
+ }
+ }
+ spin_unlock(&zbud_budlists_spinlock);
+ }
+ /* didn't find a good buddy, try allocating a new page */
+ zbpg = zbud_alloc_raw_page();
+ if (unlikely(zbpg == NULL))
+ goto out;
+ /* ok, have a page, now compress the data before taking locks */
+ spin_lock(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
+ zbud_unbuddied[nchunks].count++;
+ zh = &zbpg->buddy[0];
+ goto init_zh;
+
+found_unbuddied:
+ ASSERT_SPINLOCK(&zbpg->lock);
+ zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
+ BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
+ if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
+ ASSERT_SENTINEL(zh0, ZBH);
+ zh = zh1;
+ } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
+ ASSERT_SENTINEL(zh1, ZBH);
+ zh = zh0;
+ } else
+ BUG();
+ list_del_init(&zbpg->bud_list);
+ zbud_unbuddied[found_good_buddy].count--;
+ list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
+ zcache_zbud_buddied_count++;
+
+init_zh:
+ SET_SENTINEL(zh, ZBH);
+ zh->size = size;
+ zh->index = index;
+ zh->oid = *oid;
+ zh->pool_id = pool_id;
+ zh->client_id = client_id;
+ to = zbud_data(zh, size);
+ memcpy(to, cdata, size);
+ spin_unlock(&zbpg->lock);
+ spin_unlock(&zbud_budlists_spinlock);
+ zbud_cumul_chunk_counts[nchunks]++;
+ atomic_inc(&zcache_zbud_curr_zpages);
+ zcache_zbud_cumul_zpages++;
+ zcache_zbud_curr_zbytes += size;
+ zcache_zbud_cumul_zbytes += size;
+out:
+ return zh;
+}
+
+static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
+{
+ struct zbud_page *zbpg;
+ unsigned budnum = zbud_budnum(zh);
+ size_t out_len = PAGE_SIZE;
+ char *to_va, *from_va;
+ unsigned size;
+ int ret = 0;
+
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbpg->lock);
+ if (list_empty(&zbpg->bud_list)) {
+ /* ignore zombie page... see zbud_evict_pages() */
+ ret = -EINVAL;
+ goto out;
+ }
+ ASSERT_SENTINEL(zh, ZBH);
+ BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
+ to_va = kmap_atomic(page, KM_USER0);
+ size = zh->size;
+ from_va = zbud_data(zh, size);
+ ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(out_len != PAGE_SIZE);
+ kunmap_atomic(to_va, KM_USER0);
+out:
+ spin_unlock(&zbpg->lock);
+ return ret;
+}
+
+/*
+ * The following routines handle shrinking of ephemeral pages by evicting
+ * pages "least valuable" first.
+ */
+
+static unsigned long zcache_evicted_raw_pages;
+static unsigned long zcache_evicted_buddied_pages;
+static unsigned long zcache_evicted_unbuddied_pages;
+
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
+ uint16_t poolid);
+static void zcache_put_pool(struct tmem_pool *pool);
+
+/*
+ * Flush and free all zbuds in a zbpg, then free the pageframe
+ */
+static void zbud_evict_zbpg(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh;
+ int i, j;
+ uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
+ uint32_t index[ZBUD_MAX_BUDS];
+ struct tmem_oid oid[ZBUD_MAX_BUDS];
+ struct tmem_pool *pool;
+ unsigned long flags;
+
+ ASSERT_SPINLOCK(&zbpg->lock);
+ for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
+ zh = &zbpg->buddy[i];
+ if (zh->size) {
+ client_id[j] = zh->client_id;
+ pool_id[j] = zh->pool_id;
+ oid[j] = zh->oid;
+ index[j] = zh->index;
+ j++;
+ }
+ }
+ spin_unlock(&zbpg->lock);
+ for (i = 0; i < j; i++) {
+ pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
+ BUG_ON(pool == NULL);
+ local_irq_save(flags);
+ /* these flushes should dispose of any local storage */
+ tmem_flush_page(pool, &oid[i], index[i]);
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ }
+}
+
+/*
+ * Free nr pages. This code is funky because we want to hold the locks
+ * protecting various lists for as short a time as possible, and in some
+ * circumstances the list may change asynchronously when the list lock is
+ * not held. In some cases we also trylock not only to avoid waiting on a
+ * page in use by another cpu, but also to avoid potential deadlock due to
+ * lock inversion.
+ */
+static void zbud_evict_pages(int nr)
+{
+ struct zbud_page *zbpg;
+ int i, newly_unused_pages = 0;
+
+
+ /* now try freeing unbuddied pages, starting with least space avail */
+ for (i = 0; i < MAX_CHUNK; i++) {
+retry_unbud_list_i:
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_unbuddied[i].list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ continue;
+ }
+ list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue;
+ zbud_unbuddied[i].count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zcache_evicted_unbuddied_pages++;
+ /* want budlists unlocked when doing zbpg eviction */
+ zbud_evict_zbpg(zbpg);
+ newly_unused_pages++;
+ local_bh_enable();
+ if (--nr <= 0)
+ goto evict_unused;
+ goto retry_unbud_list_i;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ }
+
+ /* as a last resort, free buddied pages */
+retry_bud_list:
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_buddied_list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ goto evict_unused;
+ }
+ list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue;
+ zcache_zbud_buddied_count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zcache_evicted_buddied_pages++;
+ /* want budlists unlocked when doing zbpg eviction */
+ zbud_evict_zbpg(zbpg);
+ newly_unused_pages++;
+ local_bh_enable();
+ if (--nr <= 0)
+ goto evict_unused;
+ goto retry_bud_list;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+
+evict_unused:
+ return;
+}
+
+static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
+
+static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
+ size_t size)
+{
+ struct tmem_pool *pool;
+ int i, remotenode, ret = -1;
+ unsigned char cksum, *p;
+ unsigned long flags;
+
+ for (p = data, cksum = 0, i = 0; i < size; i++)
+ cksum += *p;
+ ret = ramster_remote_put(xh, data, size, true, &remotenode);
+ if (ret == 0) {
+ /* data was successfully remoted so change the local version
+ * to point to the remote node where it landed */
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
+ BUG_ON(pool == NULL);
+ local_irq_save(flags);
+ /* tmem_replace will also free up any local space */
+ (void)tmem_replace(pool, &xh->oid, xh->index,
+ pampd_make_remote(remotenode, size, cksum));
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ ramster_eph_pages_remoted++;
+ ret = 0;
+ } else
+ ramster_eph_pages_remote_failed++;
+ return ret;
+}
+
+static int zbud_remotify_zbpg(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh1, *zh2 = NULL;
+ struct tmem_xhandle xh1, xh2 = { 0 };
+ char *data1 = NULL, *data2 = NULL;
+ size_t size1 = 0, size2 = 0;
+ int ret = 0;
+ unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
+
+ ASSERT_SPINLOCK(&zbpg->lock);
+ if (zbpg->buddy[0].size == 0)
+ zh1 = &zbpg->buddy[1];
+ else if (zbpg->buddy[1].size == 0)
+ zh1 = &zbpg->buddy[0];
+ else {
+ zh1 = &zbpg->buddy[0];
+ zh2 = &zbpg->buddy[1];
+ }
+ /* don't remotify pages that are already remotified */
+ if (zh1->client_id != LOCAL_CLIENT)
+ zh1 = NULL;
+ if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
+ zh2 = NULL;
+
+ /* copy the data and metadata so can release lock */
+ if (zh1 != NULL) {
+ xh1.client_id = zh1->client_id;
+ xh1.pool_id = zh1->pool_id;
+ xh1.oid = zh1->oid;
+ xh1.index = zh1->index;
+ size1 = zh1->size;
+ data1 = zbud_data(zh1, size1);
+ memcpy(tmpmem, zbud_data(zh1, size1), size1);
+ data1 = tmpmem;
+ tmpmem += size1;
+ }
+ if (zh2 != NULL) {
+ xh2.client_id = zh2->client_id;
+ xh2.pool_id = zh2->pool_id;
+ xh2.oid = zh2->oid;
+ xh2.index = zh2->index;
+ size2 = zh2->size;
+ memcpy(tmpmem, zbud_data(zh2, size2), size2);
+ data2 = tmpmem;
+ }
+ spin_unlock(&zbpg->lock);
+ preempt_enable();
+
+ /* OK, no locks held anymore, remotify one or both zbuds */
+ if (zh1 != NULL)
+ ret = zbud_remotify_zbud(&xh1, data1, size1);
+ if (zh2 != NULL)
+ ret |= zbud_remotify_zbud(&xh2, data2, size2);
+ return ret;
+}
+
+void zbud_remotify_pages(int nr)
+{
+ struct zbud_page *zbpg;
+ int i, ret;
+
+ /*
+ * for now just try remotifying unbuddied pages, starting with
+ * least space avail
+ */
+ for (i = 0; i < MAX_CHUNK; i++) {
+retry_unbud_list_i:
+ preempt_disable(); /* enable in zbud_remotify_zbpg */
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_unbuddied[i].list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+ continue; /* next i in for loop */
+ }
+ list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue; /* next list_for_each_entry */
+ zbud_unbuddied[i].count--;
+ /* want budlists unlocked when doing zbpg remotify */
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ ret = zbud_remotify_zbpg(zbpg);
+ /* preemption is re-enabled in zbud_remotify_zbpg */
+ if (ret == 0) {
+ if (--nr <= 0)
+ goto out;
+ goto retry_unbud_list_i;
+ }
+ /* if fail to remotify any page, quit */
+ pr_err("TESTING zbud_remotify_pages failed on page,"
+ " trying to re-add\n");
+ spin_lock_bh(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
+ zbud_unbuddied[i].count++;
+ spin_unlock(&zbpg->lock);
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ pr_err("TESTING zbud_remotify_pages failed on page,"
+ " finished re-add\n");
+ goto out;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+ }
+
+next_buddied_zbpg:
+ preempt_disable(); /* enable in zbud_remotify_zbpg */
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_buddied_list))
+ goto unlock_out;
+ list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue; /* next list_for_each_entry */
+ zcache_zbud_buddied_count--;
+ /* want budlists unlocked when doing zbpg remotify */
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ ret = zbud_remotify_zbpg(zbpg);
+ /* preemption is re-enabled in zbud_remotify_zbpg */
+ if (ret == 0) {
+ if (--nr <= 0)
+ goto out;
+ goto next_buddied_zbpg;
+ }
+ /* if fail to remotify any page, quit */
+ pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
+ " trying to re-add\n");
+ spin_lock_bh(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
+ zcache_zbud_buddied_count++;
+ spin_unlock(&zbpg->lock);
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
+ " finished re-add\n");
+ goto out;
+ }
+unlock_out:
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+out:
+ return;
+}
+
+/* the "flush list" asynchronously collects pages to remotely flush */
+#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
+static void ramster_flnode_free(struct flushlist_node *,
+ struct tmem_pool *);
+
+static void zcache_remote_flush_page(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = ramster_remote_flush(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_pages_flushed++;
+ else
+ ramster_remote_page_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+static void zcache_remote_flush_object(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = ramster_remote_flush_object(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_objects_flushed++;
+ else
+ ramster_remote_object_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+static void zcache_remote_eph_put(struct zbud_hdr *zbud)
+{
+ /* FIXME */
+}
+
+static void zcache_remote_pers_put(struct zv_hdr *zv)
+{
+ struct tmem_xhandle xh;
+ uint16_t size;
+ bool ephemeral;
+ int remotenode, ret = -1;
+ char *data;
+ struct tmem_pool *pool;
+ unsigned long flags;
+ unsigned char cksum;
+ char *p;
+ int i;
+ unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
+
+ ASSERT_SENTINEL(zv, ZVH);
+ BUG_ON(zv->client_id != LOCAL_CLIENT);
+ local_bh_disable();
+ xh.client_id = zv->client_id;
+ xh.pool_id = zv->pool_id;
+ xh.oid = zv->oid;
+ xh.index = zv->index;
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0 || size > zv_max_page_size);
+ data = (char *)zv + sizeof(*zv);
+ for (p = data, cksum = 0, i = 0; i < size; i++)
+ cksum += *p;
+ memcpy(tmpmem, data, size);
+ data = tmpmem;
+ pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
+ ephemeral = is_ephemeral(pool);
+ zcache_put_pool(pool);
+ /* now OK to release lock set in caller */
+ spin_unlock(&zcache_rem_op_list_lock);
+ local_bh_enable();
+ preempt_disable();
+ ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
+ preempt_enable_no_resched();
+ if (ret != 0) {
+ /*
+ * This is some form of a memory leak... if the remote put
+ * fails, there will never be another attempt to remotify
+ * this page. But since we've dropped the zv pointer,
+ * the page may have been freed or the data replaced
+ * so we can't just "put it back" in the remote op list.
+ * Even if we could, not sure where to put it in the list
+ * because there may be flushes that must be strictly
+ * ordered vs the put. So leave this as a FIXME for now.
+ * But count them so we know if it becomes a problem.
+ */
+ ramster_pers_pages_remote_failed++;
+ goto out;
+ } else
+ atomic_inc(&ramster_remote_pers_pages);
+ ramster_pers_pages_remoted++;
+ /*
+ * data was successfully remoted so change the local version to
+ * point to the remote node where it landed
+ */
+ local_bh_disable();
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
+ local_irq_save(flags);
+ (void)tmem_replace(pool, &xh.oid, xh.index,
+ pampd_make_remote(remotenode, size, cksum));
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ local_bh_enable();
+out:
+ return;
+}
+
+static void zcache_do_remotify_ops(int nr)
+{
+ struct ramster_remotify_hdr *rem_op;
+ union remotify_list_node *u;
+
+ while (1) {
+ if (!nr)
+ goto out;
+ spin_lock(&zcache_rem_op_list_lock);
+ if (list_empty(&zcache_rem_op_list)) {
+ spin_unlock(&zcache_rem_op_list_lock);
+ goto out;
+ }
+ rem_op = list_first_entry(&zcache_rem_op_list,
+ struct ramster_remotify_hdr, list);
+ list_del_init(&rem_op->list);
+ if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
+ spin_unlock(&zcache_rem_op_list_lock);
+ u = (union remotify_list_node *)rem_op;
+ switch (rem_op->op) {
+ case RAMSTER_REMOTIFY_EPH_PUT:
+BUG();
+ zcache_remote_eph_put((struct zbud_hdr *)rem_op);
+ break;
+ case RAMSTER_REMOTIFY_PERS_PUT:
+ zcache_remote_pers_put((struct zv_hdr *)rem_op);
+ break;
+ case RAMSTER_REMOTIFY_FLUSH_PAGE:
+ zcache_remote_flush_page((struct flushlist_node *)u);
+ break;
+ case RAMSTER_REMOTIFY_FLUSH_OBJ:
+ zcache_remote_flush_object((struct flushlist_node *)u);
+ break;
+ default:
+ BUG();
+ }
+ }
+out:
+ return;
+}
+
+/*
+ * Communicate interface revision with userspace
+ */
+#include "cluster/ramster_nodemanager.h"
+static unsigned long ramster_interface_revision = R2NM_API_VERSION;
+
+/*
+ * For now, just push over a few pages every few seconds to
+ * ensure that it basically works
+ */
+static struct workqueue_struct *ramster_remotify_workqueue;
+static void ramster_remotify_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ramster_remotify_worker,
+ ramster_remotify_process);
+
+static void ramster_remotify_queue_delayed_work(unsigned long delay)
+{
+ if (!queue_delayed_work(ramster_remotify_workqueue,
+ &ramster_remotify_worker, delay))
+ pr_err("ramster_remotify: bad workqueue\n");
+}
+
+
+static int use_frontswap;
+static int use_cleancache;
+static int ramster_remote_target_nodenum = -1;
+static void ramster_remotify_process(struct work_struct *work)
+{
+ static bool remotify_in_progress;
+
+ BUG_ON(irqs_disabled());
+ if (remotify_in_progress)
+ ramster_remotify_queue_delayed_work(HZ);
+ else if (ramster_remote_target_nodenum != -1) {
+ remotify_in_progress = true;
+#ifdef CONFIG_CLEANCACHE
+ if (use_cleancache && ramster_eph_remotify_enable)
+ zbud_remotify_pages(5000); /* FIXME is this a good number? */
+#endif
+#ifdef CONFIG_FRONTSWAP
+ if (use_frontswap && ramster_pers_remotify_enable)
+ zcache_do_remotify_ops(500); /* FIXME is this a good number? */
+#endif
+ remotify_in_progress = false;
+ ramster_remotify_queue_delayed_work(HZ);
+ }
+}
+
+static void ramster_remotify_init(void)
+{
+ unsigned long n = 60UL;
+ ramster_remotify_workqueue =
+ create_singlethread_workqueue("ramster_remotify");
+ ramster_remotify_queue_delayed_work(n * HZ);
+}
+
+
+static void zbud_init(void)
+{
+ int i;
+
+ INIT_LIST_HEAD(&zbud_buddied_list);
+ zcache_zbud_buddied_count = 0;
+ for (i = 0; i < NCHUNKS; i++) {
+ INIT_LIST_HEAD(&zbud_unbuddied[i].list);
+ zbud_unbuddied[i].count = 0;
+ }
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * These sysfs routines show a nice distribution of how many zbpg's are
+ * currently (and have ever been placed) in each unbuddied list. It's fun
+ * to watch but can probably go away before final merge.
+ */
+static int zbud_show_unbuddied_list_counts(char *buf)
+{
+ int i;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++)
+ p += sprintf(p, "%u ", zbud_unbuddied[i].count);
+ return p - buf;
+}
+
+static int zbud_show_cumul_chunk_counts(char *buf)
+{
+ unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
+ unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
+ unsigned long total_chunks_lte_42 = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
+ chunks += zbud_cumul_chunk_counts[i];
+ total_chunks += zbud_cumul_chunk_counts[i];
+ sum_total_chunks += i * zbud_cumul_chunk_counts[i];
+ if (i == 21)
+ total_chunks_lte_21 = total_chunks;
+ if (i == 32)
+ total_chunks_lte_32 = total_chunks;
+ if (i == 42)
+ total_chunks_lte_42 = total_chunks;
+ }
+ p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
+ total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+#endif
+
+/**********
+ * This "zv" PAM implementation combines the TLSF-based xvMalloc
+ * with lzo1x compression to maximize the amount of data that can
+ * be packed into a physical page.
+ *
+ * Zv represents a PAM page with the index and object (plus a "size" value
+ * necessary for decompression) immediately preceding the compressed data.
+ */
+
+/* rudimentary policy limits */
+/* total number of persistent pages may not exceed this percentage */
+static unsigned int zv_page_count_policy_percent = 75;
+/*
+ * byte count defining poor compression; pages with greater zsize will be
+ * rejected
+ */
+static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
+/*
+ * byte count defining poor *mean* compression; pages with greater zsize
+ * will be rejected until sufficient better-compressed pages are accepted
+ * driving the mean below this threshold
+ */
+static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
+
+static atomic_t zv_curr_dist_counts[NCHUNKS];
+static atomic_t zv_cumul_dist_counts[NCHUNKS];
+
+
+static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
+ struct tmem_oid *oid, uint32_t index,
+ void *cdata, unsigned clen)
+{
+ struct page *page;
+ struct zv_hdr *zv = NULL;
+ uint32_t offset;
+ int alloc_size = clen + sizeof(struct zv_hdr);
+ int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ int ret;
+
+ BUG_ON(!irqs_disabled());
+ BUG_ON(chunks >= NCHUNKS);
+ ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
+ &page, &offset, ZCACHE_GFP_MASK);
+ if (unlikely(ret))
+ goto out;
+ atomic_inc(&zv_curr_dist_counts[chunks]);
+ atomic_inc(&zv_cumul_dist_counts[chunks]);
+ zv = kmap_atomic(page, KM_USER0) + offset;
+ zv->index = index;
+ zv->oid = *oid;
+ zv->pool_id = pool_id;
+ SET_SENTINEL(zv, ZVH);
+ INIT_LIST_HEAD(&zv->rem_op.list);
+ zv->client_id = get_client_id_from_client(cli);
+ zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
+ if (zv->client_id == LOCAL_CLIENT) {
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ }
+ memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
+ kunmap_atomic(zv, KM_USER0);
+out:
+ return zv;
+}
+
+/* similar to zv_create, but just reserve space, no data yet */
+static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index,
+ unsigned clen)
+{
+ struct zcache_client *cli = pool->client;
+ struct page *page;
+ struct zv_hdr *zv = NULL;
+ uint32_t offset;
+ int ret;
+
+ BUG_ON(!irqs_disabled());
+ BUG_ON(!is_local_client(pool->client));
+ ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
+ &page, &offset, ZCACHE_GFP_MASK);
+ if (unlikely(ret))
+ goto out;
+ zv = kmap_atomic(page, KM_USER0) + offset;
+ SET_SENTINEL(zv, ZVH);
+ INIT_LIST_HEAD(&zv->rem_op.list);
+ zv->client_id = LOCAL_CLIENT;
+ zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
+ zv->index = index;
+ zv->oid = *oid;
+ zv->pool_id = pool->pool_id;
+ kunmap_atomic(zv, KM_USER0);
+out:
+ return zv;
+}
+
+static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+{
+ unsigned long flags;
+ struct page *page;
+ uint32_t offset;
+ uint16_t size = xv_get_object_size(zv);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ BUG_ON(chunks >= NCHUNKS);
+ atomic_dec(&zv_curr_dist_counts[chunks]);
+ size -= sizeof(*zv);
+ spin_lock(&zcache_rem_op_list_lock);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0);
+ INVERT_SENTINEL(zv, ZVH);
+ if (!list_empty(&zv->rem_op.list))
+ list_del_init(&zv->rem_op.list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ page = virt_to_page(zv);
+ offset = (unsigned long)zv & ~PAGE_MASK;
+ local_irq_save(flags);
+ xv_free(xvpool, page, offset);
+ local_irq_restore(flags);
+}
+
+static void zv_decompress(struct page *page, struct zv_hdr *zv)
+{
+ size_t clen = PAGE_SIZE;
+ char *to_va;
+ unsigned size;
+ int ret;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0);
+ to_va = kmap_atomic(page, KM_USER0);
+ ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
+ size, to_va, &clen);
+ kunmap_atomic(to_va, KM_USER0);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(clen != PAGE_SIZE);
+}
+
+static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
+{
+ unsigned size;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0 || size > zv_max_page_size);
+ BUG_ON(size > *bufsize);
+ memcpy(data, (char *)zv + sizeof(*zv), size);
+ *bufsize = size;
+}
+
+static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
+{
+ unsigned zv_size;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ zv_size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(zv_size != size);
+ BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
+ memcpy((char *)zv + sizeof(*zv), data, size);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * show a distribution of compression stats for zv pages.
+ */
+
+static int zv_curr_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = atomic_read(&zv_curr_dist_counts[i]);
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+static int zv_cumul_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = atomic_read(&zv_cumul_dist_counts[i]);
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+/*
+ * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected. We don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_zsize);
+}
+
+static ssize_t zv_max_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected UNLESS the mean compression is also smaller
+ * than this value. In other words, we are load-balancing-by-zsize the
+ * accepted pages. Again, we don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_mean_zsize);
+}
+
+static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_mean_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_page_count_policy_percent via sysfs sets an upper bound of
+ * persistent (e.g. swap) pages that will be retained according to:
+ * (zv_page_count_policy_percent * totalram_pages) / 100)
+ * when that limit is reached, further puts will be rejected (until
+ * some pages have been flushed). Note that, due to compression,
+ * this number may exceed 100; it defaults to 75 and we set an
+ * arbitary limit of 150. A poor choice will almost certainly result
+ * in OOM's, so this value should only be changed prudently.
+ */
+static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_page_count_policy_percent);
+}
+
+static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > 150))
+ return -EINVAL;
+ zv_page_count_policy_percent = val;
+ return count;
+}
+
+static struct kobj_attribute zcache_zv_max_zsize_attr = {
+ .attr = { .name = "zv_max_zsize", .mode = 0644 },
+ .show = zv_max_zsize_show,
+ .store = zv_max_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
+ .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
+ .show = zv_max_mean_zsize_show,
+ .store = zv_max_mean_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
+ .attr = { .name = "zv_page_count_policy_percent",
+ .mode = 0644 },
+ .show = zv_page_count_policy_percent_show,
+ .store = zv_page_count_policy_percent_store,
+};
+#endif
+
+/*
+ * zcache core code starts here
+ */
+
+/* useful stats not collected by cleancache or frontswap */
+static unsigned long zcache_flush_total;
+static unsigned long zcache_flush_found;
+static unsigned long zcache_flobj_total;
+static unsigned long zcache_flobj_found;
+static unsigned long zcache_failed_eph_puts;
+static unsigned long zcache_nonactive_puts;
+static unsigned long zcache_failed_pers_puts;
+
+/*
+ * Tmem operations assume the poolid implies the invoking client.
+ * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
+ * RAMster has each client numbered by cluster node, and a KVM version
+ * of zcache would have one client per guest and each client might
+ * have a poolid==N.
+ */
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
+{
+ struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else {
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ }
+ if (poolid < MAX_POOLS_PER_CLIENT) {
+ pool = cli->tmem_pools[poolid];
+ if (pool != NULL)
+ atomic_inc(&pool->refcount);
+ }
+out:
+ return pool;
+}
+
+static void zcache_put_pool(struct tmem_pool *pool)
+{
+ struct zcache_client *cli = NULL;
+
+ if (pool == NULL)
+ BUG();
+ cli = pool->client;
+ atomic_dec(&pool->refcount);
+ atomic_dec(&cli->refcount);
+}
+
+int zcache_new_client(uint16_t cli_id)
+{
+ struct zcache_client *cli = NULL;
+ int ret = -1;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ if (cli->allocated)
+ goto out;
+ cli->allocated = 1;
+#ifdef CONFIG_FRONTSWAP
+ cli->xvpool = xv_create_pool();
+ if (cli->xvpool == NULL)
+ goto out;
+#endif
+ ret = 0;
+out:
+ return ret;
+}
+
+/* counters for debugging */
+static unsigned long zcache_failed_get_free_pages;
+static unsigned long zcache_failed_alloc;
+static unsigned long zcache_put_to_flush;
+
+/*
+ * for now, used named slabs so can easily track usage; later can
+ * either just use kmalloc, or perhaps add a slab-like allocator
+ * to more carefully manage total memory utilization
+ */
+static struct kmem_cache *zcache_objnode_cache;
+static struct kmem_cache *zcache_obj_cache;
+static struct kmem_cache *ramster_flnode_cache;
+static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_obj_count_max;
+static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_objnode_count_max;
+
+/*
+ * to avoid memory allocation recursion (e.g. due to direct reclaim), we
+ * preload all necessary data structures so the hostops callbacks never
+ * actually do a malloc
+ */
+struct zcache_preload {
+ void *page;
+ struct tmem_obj *obj;
+ int nr;
+ struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
+ struct flushlist_node *flnode;
+};
+static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
+
+static int zcache_do_preload(struct tmem_pool *pool)
+{
+ struct zcache_preload *kp;
+ struct tmem_objnode *objnode;
+ struct tmem_obj *obj;
+ struct flushlist_node *flnode;
+ void *page;
+ int ret = -ENOMEM;
+
+ if (unlikely(zcache_objnode_cache == NULL))
+ goto out;
+ if (unlikely(zcache_obj_cache == NULL))
+ goto out;
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
+ preempt_enable_no_resched();
+ objnode = kmem_cache_alloc(zcache_objnode_cache,
+ ZCACHE_GFP_MASK);
+ if (unlikely(objnode == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->nr < ARRAY_SIZE(kp->objnodes))
+ kp->objnodes[kp->nr++] = objnode;
+ else
+ kmem_cache_free(zcache_objnode_cache, objnode);
+ }
+ preempt_enable_no_resched();
+ obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
+ if (unlikely(obj == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
+ if (unlikely(flnode == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ if (is_ephemeral(pool)) {
+ page = (void *)__get_free_page(ZCACHE_GFP_MASK);
+ if (unlikely(page == NULL)) {
+ zcache_failed_get_free_pages++;
+ kmem_cache_free(zcache_obj_cache, obj);
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ goto out;
+ }
+ }
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->obj == NULL)
+ kp->obj = obj;
+ else
+ kmem_cache_free(zcache_obj_cache, obj);
+ if (kp->flnode == NULL)
+ kp->flnode = flnode;
+ else
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ if (is_ephemeral(pool)) {
+ if (kp->page == NULL)
+ kp->page = page;
+ else
+ free_page((unsigned long)page);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
+{
+ struct zcache_preload *kp;
+ struct flushlist_node *flnode;
+ int ret = -ENOMEM;
+
+ BUG_ON(!irqs_disabled());
+ if (unlikely(ramster_flnode_cache == NULL))
+ BUG();
+ kp = &__get_cpu_var(zcache_preloads);
+ flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
+ if (unlikely(flnode == NULL) && kp->flnode == NULL)
+ BUG(); /* FIXME handle more gracefully, but how??? */
+ else if (kp->flnode == NULL)
+ kp->flnode = flnode;
+ else
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ return ret;
+}
+
+static void *zcache_get_free_page(void)
+{
+ struct zcache_preload *kp;
+ void *page;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ page = kp->page;
+ BUG_ON(page == NULL);
+ kp->page = NULL;
+ return page;
+}
+
+static void zcache_free_page(void *p)
+{
+ free_page((unsigned long)p);
+}
+
+/*
+ * zcache implementation for tmem host ops
+ */
+
+static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
+{
+ struct tmem_objnode *objnode = NULL;
+ unsigned long count;
+ struct zcache_preload *kp;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->nr <= 0)
+ goto out;
+ objnode = kp->objnodes[kp->nr - 1];
+ BUG_ON(objnode == NULL);
+ kp->objnodes[kp->nr - 1] = NULL;
+ kp->nr--;
+ count = atomic_inc_return(&zcache_curr_objnode_count);
+ if (count > zcache_curr_objnode_count_max)
+ zcache_curr_objnode_count_max = count;
+out:
+ return objnode;
+}
+
+static void zcache_objnode_free(struct tmem_objnode *objnode,
+ struct tmem_pool *pool)
+{
+ atomic_dec(&zcache_curr_objnode_count);
+ BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
+ kmem_cache_free(zcache_objnode_cache, objnode);
+}
+
+static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
+{
+ struct tmem_obj *obj = NULL;
+ unsigned long count;
+ struct zcache_preload *kp;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ obj = kp->obj;
+ BUG_ON(obj == NULL);
+ kp->obj = NULL;
+ count = atomic_inc_return(&zcache_curr_obj_count);
+ if (count > zcache_curr_obj_count_max)
+ zcache_curr_obj_count_max = count;
+ return obj;
+}
+
+static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
+{
+ atomic_dec(&zcache_curr_obj_count);
+ BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
+ kmem_cache_free(zcache_obj_cache, obj);
+}
+
+static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
+{
+ struct flushlist_node *flnode = NULL;
+ struct zcache_preload *kp;
+ int count;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ flnode = kp->flnode;
+ BUG_ON(flnode == NULL);
+ kp->flnode = NULL;
+ count = atomic_inc_return(&ramster_curr_flnode_count);
+ if (count > ramster_curr_flnode_count_max)
+ ramster_curr_flnode_count_max = count;
+ return flnode;
+}
+
+static void ramster_flnode_free(struct flushlist_node *flnode,
+ struct tmem_pool *pool)
+{
+ atomic_dec(&ramster_curr_flnode_count);
+ BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
+ kmem_cache_free(ramster_flnode_cache, flnode);
+}
+
+static struct tmem_hostops zcache_hostops = {
+ .obj_alloc = zcache_obj_alloc,
+ .obj_free = zcache_obj_free,
+ .objnode_alloc = zcache_objnode_alloc,
+ .objnode_free = zcache_objnode_free,
+};
+
+/*
+ * zcache implementations for PAM page descriptor ops
+ */
+
+
+static inline void dec_and_check(atomic_t *pvar)
+{
+ atomic_dec(pvar);
+ /* later when all accounting is fixed, make this a BUG */
+ WARN_ON_ONCE(atomic_read(pvar) < 0);
+}
+
+static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_eph_pampd_count_max;
+static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_pers_pampd_count_max;
+
+/* forward reference */
+static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
+
+static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index, void **pampd)
+{
+ int ret = -1;
+ void *cdata = data;
+ size_t clen = size;
+ struct zcache_client *cli = pool->client;
+ uint16_t client_id = get_client_id_from_client(cli);
+ struct page *page = NULL;
+ unsigned long count;
+
+ if (!raw) {
+ page = virt_to_page(data);
+ ret = zcache_compress(page, &cdata, &clen);
+ if (ret == 0)
+ goto out;
+ if (clen == 0 || clen > zbud_max_buddy_size()) {
+ zcache_compress_poor++;
+ goto out;
+ }
+ }
+ *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
+ index, page, cdata, clen);
+ if (*pampd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ count = atomic_inc_return(&zcache_curr_eph_pampd_count);
+ if (count > zcache_curr_eph_pampd_count_max)
+ zcache_curr_eph_pampd_count_max = count;
+ if (client_id != LOCAL_CLIENT) {
+ count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
+ if (count > ramster_foreign_eph_pampd_count_max)
+ ramster_foreign_eph_pampd_count_max = count;
+ }
+out:
+ return ret;
+}
+
+static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index, void **pampd)
+{
+ int ret = -1;
+ void *cdata = data;
+ size_t clen = size;
+ struct zcache_client *cli = pool->client;
+ struct page *page;
+ unsigned long count;
+ unsigned long zv_mean_zsize;
+ struct zv_hdr *zv;
+ long curr_pers_pampd_count;
+ u64 total_zsize;
+#ifdef RAMSTER_TESTING
+ static bool pampd_neg_warned;
+#endif
+
+ curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
+ atomic_read(&ramster_remote_pers_pages);
+#ifdef RAMSTER_TESTING
+ /* should always be positive, but warn if accounting is off */
+ if (!pampd_neg_warned) {
+ pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
+ pampd_neg_warned = true;
+ }
+#endif
+ if (curr_pers_pampd_count >
+ (zv_page_count_policy_percent * totalram_pages) / 100) {
+ zcache_policy_percent_exceeded++;
+ goto out;
+ }
+ if (raw)
+ goto ok_to_create;
+ page = virt_to_page(data);
+ if (zcache_compress(page, &cdata, &clen) == 0)
+ goto out;
+ /* reject if compression is too poor */
+ if (clen > zv_max_zsize) {
+ zcache_compress_poor++;
+ goto out;
+ }
+ /* reject if mean compression is too poor */
+ if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
+ total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
+ if (zv_mean_zsize > zv_max_mean_zsize) {
+ zcache_mean_compress_poor++;
+ goto out;
+ }
+ }
+ok_to_create:
+ *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
+ if (*pampd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ count = atomic_inc_return(&zcache_curr_pers_pampd_count);
+ if (count > zcache_curr_pers_pampd_count_max)
+ zcache_curr_pers_pampd_count_max = count;
+ if (is_local_client(cli))
+ goto out;
+ zv = *(struct zv_hdr **)pampd;
+ count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
+ if (count > ramster_foreign_pers_pampd_count_max)
+ ramster_foreign_pers_pampd_count_max = count;
+out:
+ return ret;
+}
+
+static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index)
+{
+ void *pampd = NULL;
+ int ret;
+ bool ephemeral;
+
+ BUG_ON(preemptible());
+ ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
+ if (ephemeral)
+ ret = zcache_pampd_eph_create(data, size, raw, pool,
+ oid, index, &pampd);
+ else
+ ret = zcache_pampd_pers_create(data, size, raw, pool,
+ oid, index, &pampd);
+ /* FIXME add some counters here for failed creates? */
+ return pampd;
+}
+
+/*
+ * fill the pageframe corresponding to the struct page with the data
+ * from the passed pampd
+ */
+static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+{
+ int ret = 0;
+
+ BUG_ON(preemptible());
+ BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
+ BUG_ON(pampd_is_remote(pampd));
+ if (raw)
+ zv_copy_from_pampd(data, bufsize, pampd);
+ else
+ zv_decompress(virt_to_page(data), pampd);
+ return ret;
+}
+
+static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct zcache_client *cli = pool->client;
+
+ BUG_ON(preemptible());
+ BUG_ON(pampd_is_remote(pampd));
+ if (is_ephemeral(pool)) {
+ local_irq_save(flags);
+ if (raw)
+ zbud_copy_from_pampd(data, bufsize, pampd);
+ else
+ ret = zbud_decompress(virt_to_page(data), pampd);
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ local_irq_restore(flags);
+ if (!is_local_client(cli))
+ dec_and_check(&ramster_foreign_eph_pampd_count);
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else {
+ if (is_local_client(cli))
+ BUG();
+ if (raw)
+ zv_copy_from_pampd(data, bufsize, pampd);
+ else
+ zv_decompress(virt_to_page(data), pampd);
+ zv_free(cli->xvpool, pampd);
+ if (!is_local_client(cli))
+ dec_and_check(&ramster_foreign_pers_pampd_count);
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool zcache_pampd_is_remote(void *pampd)
+{
+ return pampd_is_remote(pampd);
+}
+
+/*
+ * free the pampd and remove it from any zcache lists
+ * pampd must no longer be pointed to from any tmem data structures!
+ */
+static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index, bool acct)
+{
+ struct zcache_client *cli = pool->client;
+ bool eph = is_ephemeral(pool);
+ struct zv_hdr *zv;
+
+ BUG_ON(preemptible());
+ if (pampd_is_remote(pampd)) {
+ WARN_ON(acct == false);
+ if (oid == NULL) {
+ /*
+ * a NULL oid means to ignore this pampd free
+ * as the remote freeing will be handled elsewhere
+ */
+ } else if (eph) {
+ /* FIXME remote flush optional but probably good idea */
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else if (pampd_is_intransit(pampd)) {
+ /* did a pers remote get_and_free, so just free local */
+ pampd = pampd_mask_intransit_and_remote(pampd);
+ goto local_pers;
+ } else {
+ struct flushlist_node *flnode =
+ ramster_flnode_alloc(pool);
+
+ flnode->xh.client_id = pampd_remote_node(pampd);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = *oid;
+ flnode->xh.index = index;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ dec_and_check(&ramster_remote_pers_pages);
+ }
+ } else if (eph) {
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ if (!is_local_client(pool->client))
+ dec_and_check(&ramster_foreign_eph_pampd_count);
+ if (acct)
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else {
+local_pers:
+ zv = (struct zv_hdr *)pampd;
+ if (!is_local_client(pool->client))
+ dec_and_check(&ramster_foreign_pers_pampd_count);
+ zv_free(cli->xvpool, zv);
+ if (acct)
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ }
+}
+
+static void zcache_pampd_free_obj(struct tmem_pool *pool,
+ struct tmem_obj *obj)
+{
+ struct flushlist_node *flnode;
+
+ BUG_ON(preemptible());
+ if (obj->extra == NULL)
+ return;
+ BUG_ON(!pampd_is_remote(obj->extra));
+ flnode = ramster_flnode_alloc(pool);
+ flnode->xh.client_id = pampd_remote_node(obj->extra);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = obj->oid;
+ flnode->xh.index = FLUSH_ENTIRE_OBJECT;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+}
+
+void zcache_pampd_new_obj(struct tmem_obj *obj)
+{
+ obj->extra = NULL;
+}
+
+int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
+{
+ int ret = -1;
+
+ if (new_pampd != NULL) {
+ if (obj->extra == NULL)
+ obj->extra = new_pampd;
+ /* enforce that all remote pages in an object reside
+ * in the same node! */
+ else if (pampd_remote_node(new_pampd) !=
+ pampd_remote_node((void *)(obj->extra)))
+ BUG();
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Called by the message handler after a (still compressed) page has been
+ * fetched from the remote machine in response to an "is_remote" tmem_get
+ * or persistent tmem_localify. For a tmem_get, "extra" is the address of
+ * the page that is to be filled to succesfully resolve the tmem_get; for
+ * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
+ * in the local zcache). "data" points to "size" bytes of (compressed) data
+ * passed in the message. In the case of a persistent remote get, if
+ * pre-allocation was successful (see zcache_repatriate_preload), the page
+ * is placed into both local zcache and at "extra".
+ */
+int zcache_localify(int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t size,
+ void *extra)
+{
+ int ret = -ENOENT;
+ unsigned long flags;
+ struct tmem_pool *pool;
+ bool ephemeral, delete = false;
+ size_t clen = PAGE_SIZE;
+ void *pampd, *saved_hb;
+ struct tmem_obj *obj;
+
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
+ if (unlikely(pool == NULL))
+ /* pool doesn't exist anymore */
+ goto out;
+ ephemeral = is_ephemeral(pool);
+ local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
+ pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
+ if (pampd == NULL) {
+ /* hmmm... must have been a flush while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED pampd==NULL in zcache_localify\n");
+#endif
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ goto finish;
+ } else if (unlikely(!pampd_is_remote(pampd))) {
+ /* hmmm... must have been a dup put while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED dup while waiting in zcache_localify\n");
+#endif
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ pampd = NULL;
+ ret = -EEXIST;
+ goto finish;
+ } else if (size == 0) {
+ /* no remote data, delete the local is_remote pampd */
+ pampd = NULL;
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ BUG();
+ delete = true;
+ goto finish;
+ }
+ if (!ephemeral && pampd_is_intransit(pampd)) {
+ /* localify to zcache */
+ pampd = pampd_mask_intransit_and_remote(pampd);
+ zv_copy_to_pampd(pampd, data, size);
+ } else {
+ pampd = NULL;
+ obj = NULL;
+ }
+ if (extra != NULL) {
+ /* decompress direct-to-memory to complete remotify */
+ ret = lzo1x_decompress_safe((char *)data, size,
+ (char *)extra, &clen);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(clen != PAGE_SIZE);
+ }
+ if (ephemeral)
+ ramster_remote_eph_pages_succ_get++;
+ else
+ ramster_remote_pers_pages_succ_get++;
+ ret = 0;
+finish:
+ tmem_localify_finish(obj, index, pampd, saved_hb, delete);
+ zcache_put_pool(pool);
+ local_irq_restore(flags);
+out:
+ return ret;
+}
+
+/*
+ * Called on a remote persistent tmem_get to attempt to preallocate
+ * local storage for the data contained in the remote persistent page.
+ * If succesfully preallocated, returns the pampd, marked as remote and
+ * in_transit. Else returns NULL. Note that the appropriate tmem data
+ * structure must be locked.
+ */
+static void *zcache_pampd_repatriate_preload(void *pampd,
+ struct tmem_pool *pool,
+ struct tmem_oid *oid,
+ uint32_t index,
+ bool *intransit)
+{
+ int clen = pampd_remote_size(pampd);
+ void *ret_pampd = NULL;
+ unsigned long flags;
+
+ if (!pampd_is_remote(pampd))
+ BUG();
+ if (is_ephemeral(pool))
+ BUG();
+ if (pampd_is_intransit(pampd)) {
+ /*
+ * to avoid multiple allocations (and maybe a memory leak)
+ * don't preallocate if already in the process of being
+ * repatriated
+ */
+ *intransit = true;
+ goto out;
+ }
+ *intransit = false;
+ local_irq_save(flags);
+ ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
+ if (ret_pampd != NULL) {
+ /*
+ * a pampd is marked intransit if it is remote and space has
+ * been allocated for it locally (note, only happens for
+ * persistent pages, in which case the remote copy is freed)
+ */
+ ret_pampd = pampd_mark_intransit(ret_pampd);
+ dec_and_check(&ramster_remote_pers_pages);
+ } else
+ ramster_pers_pages_remote_nomem++;
+ local_irq_restore(flags);
+out:
+ return ret_pampd;
+}
+
+/*
+ * Called on a remote tmem_get to invoke a message to fetch the page.
+ * Might sleep so no tmem locks can be held. "extra" is passed
+ * all the way through the round-trip messaging to zcache_localify.
+ */
+static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
+ struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index,
+ bool free, void *extra)
+{
+ struct tmem_xhandle xh;
+ int ret;
+
+ if (pampd_is_intransit(real_pampd))
+ /* have local space pre-reserved, so free remote copy */
+ free = true;
+ xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
+ /* unreliable request/response for now */
+ ret = ramster_remote_async_get(&xh, free,
+ pampd_remote_node(fake_pampd),
+ pampd_remote_size(fake_pampd),
+ pampd_remote_cksum(fake_pampd),
+ extra);
+#ifdef RAMSTER_TESTING
+ if (ret != 0 && ret != -ENOENT)
+ pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
+ ret);
+#endif
+ return ret;
+}
+
+static struct tmem_pamops zcache_pamops = {
+ .create = zcache_pampd_create,
+ .get_data = zcache_pampd_get_data,
+ .free = zcache_pampd_free,
+ .get_data_and_free = zcache_pampd_get_data_and_free,
+ .free_obj = zcache_pampd_free_obj,
+ .is_remote = zcache_pampd_is_remote,
+ .repatriate_preload = zcache_pampd_repatriate_preload,
+ .repatriate = zcache_pampd_repatriate,
+ .new_obj = zcache_pampd_new_obj,
+ .replace_in_obj = zcache_pampd_replace_in_obj,
+};
+
+/*
+ * zcache compression/decompression and related per-cpu stuff
+ */
+
+#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
+#define LZO_DSTMEM_PAGE_ORDER 1
+static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
+static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+
+static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
+{
+ int ret = 0;
+ unsigned char *dmem = __get_cpu_var(zcache_dstmem);
+ unsigned char *wmem = __get_cpu_var(zcache_workmem);
+ char *from_va;
+
+ BUG_ON(!irqs_disabled());
+ if (unlikely(dmem == NULL || wmem == NULL))
+ goto out; /* no buffer, so can't compress */
+ from_va = kmap_atomic(from, KM_USER0);
+ mb();
+ ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
+ BUG_ON(ret != LZO_E_OK);
+ *out_va = dmem;
+ kunmap_atomic(from_va, KM_USER0);
+ ret = 1;
+out:
+ return ret;
+}
+
+
+static int zcache_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *pcpu)
+{
+ int cpu = (long)pcpu;
+ struct zcache_preload *kp;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_REPEAT,
+ LZO_DSTMEM_PAGE_ORDER),
+ per_cpu(zcache_workmem, cpu) =
+ kzalloc(LZO1X_MEM_COMPRESS,
+ GFP_KERNEL | __GFP_REPEAT);
+ per_cpu(zcache_remoteputmem, cpu) =
+ kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ kfree(per_cpu(zcache_remoteputmem, cpu));
+ per_cpu(zcache_remoteputmem, cpu) = NULL;
+ free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
+ LZO_DSTMEM_PAGE_ORDER);
+ per_cpu(zcache_dstmem, cpu) = NULL;
+ kfree(per_cpu(zcache_workmem, cpu));
+ per_cpu(zcache_workmem, cpu) = NULL;
+ kp = &per_cpu(zcache_preloads, cpu);
+ while (kp->nr) {
+ kmem_cache_free(zcache_objnode_cache,
+ kp->objnodes[kp->nr - 1]);
+ kp->objnodes[kp->nr - 1] = NULL;
+ kp->nr--;
+ }
+ if (kp->obj) {
+ kmem_cache_free(zcache_obj_cache, kp->obj);
+ kp->obj = NULL;
+ }
+ if (kp->flnode) {
+ kmem_cache_free(ramster_flnode_cache, kp->flnode);
+ kp->flnode = NULL;
+ }
+ if (kp->page) {
+ free_page((unsigned long)kp->page);
+ kp->page = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zcache_cpu_notifier_block = {
+ .notifier_call = zcache_cpu_notifier
+};
+
+#ifdef CONFIG_SYSFS
+#define ZCACHE_SYSFS_RO(_name) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", zcache_##_name); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return _func(buf); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+ZCACHE_SYSFS_RO(curr_obj_count_max);
+ZCACHE_SYSFS_RO(curr_objnode_count_max);
+ZCACHE_SYSFS_RO(flush_total);
+ZCACHE_SYSFS_RO(flush_found);
+ZCACHE_SYSFS_RO(flobj_total);
+ZCACHE_SYSFS_RO(flobj_found);
+ZCACHE_SYSFS_RO(failed_eph_puts);
+ZCACHE_SYSFS_RO(nonactive_puts);
+ZCACHE_SYSFS_RO(failed_pers_puts);
+ZCACHE_SYSFS_RO(zbud_curr_zbytes);
+ZCACHE_SYSFS_RO(zbud_cumul_zpages);
+ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
+ZCACHE_SYSFS_RO(zbud_buddied_count);
+ZCACHE_SYSFS_RO(evicted_raw_pages);
+ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
+ZCACHE_SYSFS_RO(evicted_buddied_pages);
+ZCACHE_SYSFS_RO(failed_get_free_pages);
+ZCACHE_SYSFS_RO(failed_alloc);
+ZCACHE_SYSFS_RO(put_to_flush);
+ZCACHE_SYSFS_RO(compress_poor);
+ZCACHE_SYSFS_RO(mean_compress_poor);
+ZCACHE_SYSFS_RO(policy_percent_exceeded);
+ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
+ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
+ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
+ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
+ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
+ zbud_show_unbuddied_list_counts);
+ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
+ zbud_show_cumul_chunk_counts);
+ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
+ zv_curr_dist_counts_show);
+ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
+ zv_cumul_dist_counts_show);
+
+static struct attribute *zcache_attrs[] = {
+ &zcache_curr_obj_count_attr.attr,
+ &zcache_curr_obj_count_max_attr.attr,
+ &zcache_curr_objnode_count_attr.attr,
+ &zcache_curr_objnode_count_max_attr.attr,
+ &zcache_flush_total_attr.attr,
+ &zcache_flobj_total_attr.attr,
+ &zcache_flush_found_attr.attr,
+ &zcache_flobj_found_attr.attr,
+ &zcache_failed_eph_puts_attr.attr,
+ &zcache_nonactive_puts_attr.attr,
+ &zcache_failed_pers_puts_attr.attr,
+ &zcache_policy_percent_exceeded_attr.attr,
+ &zcache_compress_poor_attr.attr,
+ &zcache_mean_compress_poor_attr.attr,
+ &zcache_zbud_curr_raw_pages_attr.attr,
+ &zcache_zbud_curr_zpages_attr.attr,
+ &zcache_zbud_curr_zbytes_attr.attr,
+ &zcache_zbud_cumul_zpages_attr.attr,
+ &zcache_zbud_cumul_zbytes_attr.attr,
+ &zcache_zbud_buddied_count_attr.attr,
+ &zcache_evicted_raw_pages_attr.attr,
+ &zcache_evicted_unbuddied_pages_attr.attr,
+ &zcache_evicted_buddied_pages_attr.attr,
+ &zcache_failed_get_free_pages_attr.attr,
+ &zcache_failed_alloc_attr.attr,
+ &zcache_put_to_flush_attr.attr,
+ &zcache_zbud_unbuddied_list_counts_attr.attr,
+ &zcache_zbud_cumul_chunk_counts_attr.attr,
+ &zcache_zv_curr_dist_counts_attr.attr,
+ &zcache_zv_cumul_dist_counts_attr.attr,
+ &zcache_zv_max_zsize_attr.attr,
+ &zcache_zv_max_mean_zsize_attr.attr,
+ &zcache_zv_page_count_policy_percent_attr.attr,
+ NULL,
+};
+
+static struct attribute_group zcache_attr_group = {
+ .attrs = zcache_attrs,
+ .name = "zcache",
+};
+
+#define RAMSTER_SYSFS_RO(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+#define RAMSTER_SYSFS_RW(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static ssize_t ramster_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, size_t count) \
+ { \
+ int err; \
+ unsigned long enable; \
+ err = kstrtoul(buf, 10, &enable); \
+ if (err) \
+ return -EINVAL; \
+ ramster_##_name = enable; \
+ return count; \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0644 }, \
+ .show = ramster_##_name##_show, \
+ .store = ramster_##_name##_store, \
+ }
+
+#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+RAMSTER_SYSFS_RO(interface_revision);
+RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
+RAMSTER_SYSFS_RW(pers_remotify_enable);
+RAMSTER_SYSFS_RW(eph_remotify_enable);
+RAMSTER_SYSFS_RO(eph_pages_remoted);
+RAMSTER_SYSFS_RO(eph_pages_remote_failed);
+RAMSTER_SYSFS_RO(pers_pages_remoted);
+RAMSTER_SYSFS_RO(pers_pages_remote_failed);
+RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
+RAMSTER_SYSFS_RO(remote_pages_flushed);
+RAMSTER_SYSFS_RO(remote_page_flushes_failed);
+RAMSTER_SYSFS_RO(remote_objects_flushed);
+RAMSTER_SYSFS_RO(remote_object_flushes_failed);
+RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
+RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
+RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
+RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
+RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
+RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
+RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
+RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
+RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
+RAMSTER_SYSFS_RO(curr_flnode_count_max);
+
+#define MANUAL_NODES 8
+static bool ramster_nodes_manual_up[MANUAL_NODES];
+static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i;
+ char *p = buf;
+ for (i = 0; i < MANUAL_NODES; i++)
+ if (ramster_nodes_manual_up[i])
+ p += sprintf(p, "%d ", i);
+ p += sprintf(p, "\n");
+ return p - buf;
+}
+
+static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ }
+ if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ }
+ if (ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d already up, ignoring\n",
+ (int)node_num);
+ } else {
+ ramster_nodes_manual_up[node_num] = true;
+ r2net_hb_node_up_manual((int)node_num);
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_manual_node_up_attr = {
+ .attr = { .name = "manual_node_up", .mode = 0644 },
+ .show = ramster_manual_node_up_show,
+ .store = ramster_manual_node_up_store,
+};
+
+static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (ramster_remote_target_nodenum == -1UL)
+ return sprintf(buf, "unset\n");
+ else
+ return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
+}
+
+static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ } else if (node_num == -1UL) {
+ pr_err("ramster: disabling all remotification, "
+ "data may still reside on remote nodes however\n");
+ return -EINVAL;
+ } else if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ } else if (!ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d not up, ignoring setting "
+ "of remotification target\n", (int)node_num);
+ } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
+ pr_info("ramster: node %d set as remotification target\n",
+ (int)node_num);
+ ramster_remote_target_nodenum = (int)node_num;
+ } else {
+ pr_err("ramster: bad num to node node_num=%d?\n",
+ (int)node_num);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_remote_target_nodenum_attr = {
+ .attr = { .name = "remote_target_nodenum", .mode = 0644 },
+ .show = ramster_remote_target_nodenum_show,
+ .store = ramster_remote_target_nodenum_store,
+};
+
+
+static struct attribute *ramster_attrs[] = {
+ &ramster_interface_revision_attr.attr,
+ &ramster_pers_remotify_enable_attr.attr,
+ &ramster_eph_remotify_enable_attr.attr,
+ &ramster_remote_pers_pages_attr.attr,
+ &ramster_eph_pages_remoted_attr.attr,
+ &ramster_eph_pages_remote_failed_attr.attr,
+ &ramster_pers_pages_remoted_attr.attr,
+ &ramster_pers_pages_remote_failed_attr.attr,
+ &ramster_pers_pages_remote_nomem_attr.attr,
+ &ramster_remote_pages_flushed_attr.attr,
+ &ramster_remote_page_flushes_failed_attr.attr,
+ &ramster_remote_objects_flushed_attr.attr,
+ &ramster_remote_object_flushes_failed_attr.attr,
+ &ramster_remote_eph_pages_succ_get_attr.attr,
+ &ramster_remote_eph_pages_unsucc_get_attr.attr,
+ &ramster_remote_pers_pages_succ_get_attr.attr,
+ &ramster_remote_pers_pages_unsucc_get_attr.attr,
+ &ramster_foreign_eph_pampd_count_attr.attr,
+ &ramster_foreign_eph_pampd_count_max_attr.attr,
+ &ramster_foreign_pers_pampd_count_attr.attr,
+ &ramster_foreign_pers_pampd_count_max_attr.attr,
+ &ramster_curr_flnode_count_attr.attr,
+ &ramster_curr_flnode_count_max_attr.attr,
+ &ramster_manual_node_up_attr.attr,
+ &ramster_remote_target_nodenum_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ramster_attr_group = {
+ .attrs = ramster_attrs,
+ .name = "ramster",
+};
+
+#endif /* CONFIG_SYSFS */
+/*
+ * When zcache is disabled ("frozen"), pools can be created and destroyed,
+ * but all puts (and thus all other operations that require memory allocation)
+ * must fail. If zcache is unfrozen, accepts puts, then frozen again,
+ * data consistency requires all puts while frozen to be converted into
+ * flushes.
+ */
+static bool zcache_freeze;
+
+/*
+ * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
+ */
+static int shrink_zcache_memory(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ int ret = -1;
+ int nr = sc->nr_to_scan;
+ gfp_t gfp_mask = sc->gfp_mask;
+
+ if (nr >= 0) {
+ if (!(gfp_mask & __GFP_FS))
+ /* does this case really need to be skipped? */
+ goto out;
+ zbud_evict_pages(nr);
+ }
+ ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
+out:
+ return ret;
+}
+
+static struct shrinker zcache_shrinker = {
+ .shrink = shrink_zcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * zcache shims between cleancache/frontswap ops and tmem
+ */
+
+int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t size,
+ bool raw, int ephemeral)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+
+ BUG_ON(!irqs_disabled());
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ if (unlikely(pool == NULL))
+ goto out;
+ if (!zcache_freeze && zcache_do_preload(pool) == 0) {
+ /* preload does preempt_disable on success */
+ ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
+ if (ret < 0) {
+ if (is_ephemeral(pool))
+ zcache_failed_eph_puts++;
+ else
+ zcache_failed_pers_puts++;
+ }
+ zcache_put_pool(pool);
+ preempt_enable_no_resched();
+ } else {
+ zcache_put_to_flush++;
+ if (atomic_read(&pool->obj_count) > 0)
+ /* the put fails whether the flush succeeds or not */
+ (void)tmem_flush_page(pool, oidp, index);
+ zcache_put_pool(pool);
+ }
+out:
+ return ret;
+}
+
+int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t *sizep,
+ bool raw, int get_and_free)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ bool eph;
+
+ if (!raw) {
+ BUG_ON(irqs_disabled());
+ BUG_ON(in_softirq());
+ }
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ eph = is_ephemeral(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_get(pool, oidp, index, data, sizep,
+ raw, get_and_free);
+ zcache_put_pool(pool);
+ }
+ WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
+ "bad things are very likely to happen soon\n");
+#ifdef RAMSTER_TESTING
+ if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
+ pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
+#endif
+ if (ret == -EAGAIN)
+ BUG(); /* FIXME... don't need this anymore??? let's ensure */
+ return ret;
+}
+
+int zcache_flush(int cli_id, int pool_id,
+ struct tmem_oid *oidp, uint32_t index)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ zcache_flush_total++;
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ ramster_do_preload_flnode_only(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_flush_page(pool, oidp, index);
+ zcache_put_pool(pool);
+ }
+ if (ret >= 0)
+ zcache_flush_found++;
+ local_irq_restore(flags);
+ return ret;
+}
+
+int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ zcache_flobj_total++;
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ ramster_do_preload_flnode_only(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_flush_object(pool, oidp);
+ zcache_put_pool(pool);
+ }
+ if (ret >= 0)
+ zcache_flobj_found++;
+ local_irq_restore(flags);
+ return ret;
+}
+
+int zcache_client_destroy_pool(int cli_id, int pool_id)
+{
+ struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
+ int ret = -1;
+
+ if (pool_id < 0)
+ goto out;
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
+ if (pool == NULL)
+ goto out;
+ cli->tmem_pools[pool_id] = NULL;
+ /* wait for pool activity on other cpus to quiesce */
+ while (atomic_read(&pool->refcount) != 0)
+ ;
+ atomic_dec(&cli->refcount);
+ local_bh_disable();
+ ret = tmem_destroy_pool(pool);
+ local_bh_enable();
+ kfree(pool);
+ pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
+out:
+ return ret;
+}
+
+static int zcache_destroy_pool(int pool_id)
+{
+ return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
+}
+
+int zcache_new_pool(uint16_t cli_id, uint32_t flags)
+{
+ int poolid = -1;
+ struct tmem_pool *pool;
+ struct zcache_client *cli = NULL;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
+ if (pool == NULL) {
+ pr_info("ramster: pool creation failed: out of memory\n");
+ goto out;
+ }
+
+ for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
+ if (cli->tmem_pools[poolid] == NULL)
+ break;
+ if (poolid >= MAX_POOLS_PER_CLIENT) {
+ pr_info("ramster: pool creation failed: max exceeded\n");
+ kfree(pool);
+ poolid = -1;
+ goto out;
+ }
+ atomic_set(&pool->refcount, 0);
+ pool->client = cli;
+ pool->pool_id = poolid;
+ tmem_new_pool(pool, flags);
+ cli->tmem_pools[poolid] = pool;
+ if (cli_id == LOCAL_CLIENT)
+ pr_info("ramster: created %s tmem pool, id=%d, local client\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid);
+ else
+ pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid, cli_id);
+out:
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
+ return poolid;
+}
+
+static int zcache_local_new_pool(uint32_t flags)
+{
+ return zcache_new_pool(LOCAL_CLIENT, flags);
+}
+
+int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
+{
+ struct tmem_pool *pool;
+ struct zcache_client *cli = NULL;
+ uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
+ int ret = -1;
+
+ if (cli_id == LOCAL_CLIENT)
+ goto out;
+ if (pool_id >= MAX_POOLS_PER_CLIENT)
+ goto out;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
+ BUG(); /* FIXME, handle more gracefully later */
+ if (!cli->allocated) {
+ if (zcache_new_client(cli_id))
+ BUG(); /* FIXME, handle more gracefully later */
+ cli = &zcache_clients[cli_id];
+ }
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
+ if (pool != NULL) {
+ if (pool->persistent && ephemeral) {
+ pr_err("zcache_autocreate_pool: type mismatch\n");
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
+ if (pool == NULL) {
+ pr_info("ramster: pool creation failed: out of memory\n");
+ goto out;
+ }
+ atomic_set(&pool->refcount, 0);
+ pool->client = cli;
+ pool->pool_id = pool_id;
+ tmem_new_pool(pool, flags);
+ cli->tmem_pools[pool_id] = pool;
+ pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ pool_id, cli_id);
+ ret = 0;
+out:
+ if (cli == NULL)
+ BUG(); /* FIXME, handle more gracefully later */
+ /* pr_err("zcache_autocreate_pool: failed\n"); */
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
+ return ret;
+}
+
+/**********
+ * Two kernel functionalities currently can be layered on top of tmem.
+ * These are "cleancache" which is used as a second-chance cache for clean
+ * page cache pages; and "frontswap" which is used for swap pages
+ * to avoid writes to disk. A generic "shim" is provided here for each
+ * to translate in-kernel semantics to zcache semantics.
+ */
+
+#ifdef CONFIG_CLEANCACHE
+static void zcache_cleancache_put_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index, struct page *page)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+#ifdef __PG_WAS_ACTIVE
+ if (!PageWasActive(page)) {
+ zcache_nonactive_puts++;
+ return;
+ }
+#endif
+ if (likely(ind == index)) {
+ char *kva = page_address(page);
+
+ (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
+ kva, PAGE_SIZE, 0, 1);
+ }
+}
+
+static int zcache_cleancache_get_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index, struct page *page)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+ int ret = -1;
+
+ preempt_disable();
+ if (likely(ind == index)) {
+ char *kva = page_address(page);
+ size_t size = PAGE_SIZE;
+
+ ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
+ kva, &size, 0, 0);
+#ifdef __PG_WAS_ACTIVE
+ if (ret == 0)
+ SetPageWasActive(page);
+#endif
+ }
+ preempt_enable();
+ return ret;
+}
+
+static void zcache_cleancache_flush_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+ if (likely(ind == index))
+ (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
+}
+
+static void zcache_cleancache_flush_inode(int pool_id,
+ struct cleancache_filekey key)
+{
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+ (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
+}
+
+static void zcache_cleancache_flush_fs(int pool_id)
+{
+ if (pool_id >= 0)
+ (void)zcache_destroy_pool(pool_id);
+}
+
+static int zcache_cleancache_init_fs(size_t pagesize)
+{
+ BUG_ON(sizeof(struct cleancache_filekey) !=
+ sizeof(struct tmem_oid));
+ BUG_ON(pagesize != PAGE_SIZE);
+ return zcache_local_new_pool(0);
+}
+
+static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+{
+ /* shared pools are unsupported and map to private */
+ BUG_ON(sizeof(struct cleancache_filekey) !=
+ sizeof(struct tmem_oid));
+ BUG_ON(pagesize != PAGE_SIZE);
+ return zcache_local_new_pool(0);
+}
+
+static struct cleancache_ops zcache_cleancache_ops = {
+ .put_page = zcache_cleancache_put_page,
+ .get_page = zcache_cleancache_get_page,
+ .invalidate_page = zcache_cleancache_flush_page,
+ .invalidate_inode = zcache_cleancache_flush_inode,
+ .invalidate_fs = zcache_cleancache_flush_fs,
+ .init_shared_fs = zcache_cleancache_init_shared_fs,
+ .init_fs = zcache_cleancache_init_fs
+};
+
+struct cleancache_ops zcache_cleancache_register_ops(void)
+{
+ struct cleancache_ops old_ops =
+ cleancache_register_ops(&zcache_cleancache_ops);
+
+ return old_ops;
+}
+#endif
+
+#ifdef CONFIG_FRONTSWAP
+/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+static int zcache_frontswap_poolid = -1;
+
+/*
+ * Swizzling increases objects per swaptype, increasing tmem concurrency
+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ */
+#define SWIZ_BITS 8
+#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
+#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+#define iswiz(_ind) (_ind >> SWIZ_BITS)
+
+static inline struct tmem_oid oswiz(unsigned type, u32 ind)
+{
+ struct tmem_oid oid = { .oid = { 0 } };
+ oid.oid[0] = _oswiz(type, ind);
+ return oid;
+}
+
+static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+ int ret = -1;
+ unsigned long flags;
+ char *kva;
+
+ BUG_ON(!PageLocked(page));
+ if (likely(ind64 == ind)) {
+ local_irq_save(flags);
+ kva = page_address(page);
+ ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
+ local_irq_restore(flags);
+ }
+ return ret;
+}
+
+/* returns 0 if the page was successfully gotten from frontswap, -1 if
+ * was not present (should never happen!) */
+static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+ int ret = -1;
+
+ preempt_disable(); /* FIXME, remove this? */
+ BUG_ON(!PageLocked(page));
+ if (likely(ind64 == ind)) {
+ char *kva = page_address(page);
+ size_t size = PAGE_SIZE;
+
+ ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), kva, &size, 0, -1);
+ }
+ preempt_enable(); /* FIXME, remove this? */
+ return ret;
+}
+
+/* flush a single page from frontswap */
+static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+
+ if (likely(ind64 == ind))
+ (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind));
+}
+
+/* flush all pages from the passed swaptype */
+static void zcache_frontswap_flush_area(unsigned type)
+{
+ struct tmem_oid oid;
+ int ind;
+
+ for (ind = SWIZ_MASK; ind >= 0; ind--) {
+ oid = oswiz(type, ind);
+ (void)zcache_flush_object(LOCAL_CLIENT,
+ zcache_frontswap_poolid, &oid);
+ }
+}
+
+static void zcache_frontswap_init(unsigned ignored)
+{
+ /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+ if (zcache_frontswap_poolid < 0)
+ zcache_frontswap_poolid =
+ zcache_local_new_pool(TMEM_POOL_PERSIST);
+}
+
+static struct frontswap_ops zcache_frontswap_ops = {
+ .put_page = zcache_frontswap_put_page,
+ .get_page = zcache_frontswap_get_page,
+ .invalidate_page = zcache_frontswap_flush_page,
+ .invalidate_area = zcache_frontswap_flush_area,
+ .init = zcache_frontswap_init
+};
+
+struct frontswap_ops zcache_frontswap_register_ops(void)
+{
+ struct frontswap_ops old_ops =
+ frontswap_register_ops(&zcache_frontswap_ops);
+
+ return old_ops;
+}
+#endif
+
+/*
+ * frontswap selfshrinking
+ */
+
+#ifdef CONFIG_FRONTSWAP
+/* In HZ, controls frequency of worker invocation. */
+static unsigned int selfshrink_interval __read_mostly = 5;
+
+static void selfshrink_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
+
+/* Enable/disable with sysfs. */
+static bool frontswap_selfshrinking __read_mostly;
+
+/* Enable/disable with kernel boot option. */
+static bool use_frontswap_selfshrink __initdata = true;
+
+/*
+ * The default values for the following parameters were deemed reasonable
+ * by experimentation, may be workload-dependent, and can all be
+ * adjusted via sysfs.
+ */
+
+/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
+static unsigned int frontswap_hysteresis __read_mostly = 20;
+
+/*
+ * Number of selfshrink worker invocations to wait before observing that
+ * frontswap selfshrinking should commence. Note that selfshrinking does
+ * not use a separate worker thread.
+ */
+static unsigned int frontswap_inertia __read_mostly = 3;
+
+/* Countdown to next invocation of frontswap_shrink() */
+static unsigned long frontswap_inertia_counter;
+
+/*
+ * Invoked by the selfshrink worker thread, uses current number of pages
+ * in frontswap (frontswap_curr_pages()), previous status, and control
+ * values (hysteresis and inertia) to determine if frontswap should be
+ * shrunk and what the new frontswap size should be. Note that
+ * frontswap_shrink is essentially a partial swapoff that immediately
+ * transfers pages from the "swap device" (frontswap) back into kernel
+ * RAM; despite the name, frontswap "shrinking" is very different from
+ * the "shrinker" interface used by the kernel MM subsystem to reclaim
+ * memory.
+ */
+static void frontswap_selfshrink(void)
+{
+ static unsigned long cur_frontswap_pages;
+ static unsigned long last_frontswap_pages;
+ static unsigned long tgt_frontswap_pages;
+
+ last_frontswap_pages = cur_frontswap_pages;
+ cur_frontswap_pages = frontswap_curr_pages();
+ if (!cur_frontswap_pages ||
+ (cur_frontswap_pages > last_frontswap_pages)) {
+ frontswap_inertia_counter = frontswap_inertia;
+ return;
+ }
+ if (frontswap_inertia_counter && --frontswap_inertia_counter)
+ return;
+ if (cur_frontswap_pages <= frontswap_hysteresis)
+ tgt_frontswap_pages = 0;
+ else
+ tgt_frontswap_pages = cur_frontswap_pages -
+ (cur_frontswap_pages / frontswap_hysteresis);
+ frontswap_shrink(tgt_frontswap_pages);
+}
+
+static int __init ramster_nofrontswap_selfshrink_setup(char *s)
+{
+ use_frontswap_selfshrink = false;
+ return 1;
+}
+
+__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
+
+static void selfshrink_process(struct work_struct *work)
+{
+ if (frontswap_selfshrinking && frontswap_enabled) {
+ frontswap_selfshrink();
+ schedule_delayed_work(&selfshrink_worker,
+ selfshrink_interval * HZ);
+ }
+}
+
+static int ramster_enabled;
+
+static int __init ramster_selfshrink_init(void)
+{
+ frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
+ if (frontswap_selfshrinking)
+ pr_info("ramster: Initializing frontswap "
+ "selfshrinking driver.\n");
+ else
+ return -ENODEV;
+
+ schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
+
+ return 0;
+}
+
+subsys_initcall(ramster_selfshrink_init);
+#endif
+
+/*
+ * zcache initialization
+ * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
+ * NOTHING HAPPENS!
+ */
+
+static int ramster_enabled;
+
+static int __init enable_ramster(char *s)
+{
+ ramster_enabled = 1;
+ return 1;
+}
+__setup("ramster", enable_ramster);
+
+/* allow independent dynamic disabling of cleancache and frontswap */
+
+static int use_cleancache = 1;
+
+static int __init no_cleancache(char *s)
+{
+ pr_info("INIT no_cleancache called\n");
+ use_cleancache = 0;
+ return 1;
+}
+
+/*
+ * FIXME: need to guarantee this gets checked before zcache_init is called
+ * What is the correct way to achieve this?
+ */
+early_param("nocleancache", no_cleancache);
+
+static int use_frontswap = 1;
+
+static int __init no_frontswap(char *s)
+{
+ pr_info("INIT no_frontswap called\n");
+ use_frontswap = 0;
+ return 1;
+}
+
+__setup("nofrontswap", no_frontswap);
+
+static int __init zcache_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_SYSFS
+ ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
+ ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
+ if (ret) {
+ pr_err("ramster: can't create sysfs\n");
+ goto out;
+ }
+#endif /* CONFIG_SYSFS */
+#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
+ if (ramster_enabled) {
+ unsigned int cpu;
+
+ (void)r2net_register_handlers();
+ tmem_register_hostops(&zcache_hostops);
+ tmem_register_pamops(&zcache_pamops);
+ ret = register_cpu_notifier(&zcache_cpu_notifier_block);
+ if (ret) {
+ pr_err("ramster: can't register cpu notifier\n");
+ goto out;
+ }
+ for_each_online_cpu(cpu) {
+ void *pcpu = (void *)(long)cpu;
+ zcache_cpu_notifier(&zcache_cpu_notifier_block,
+ CPU_UP_PREPARE, pcpu);
+ }
+ }
+ zcache_objnode_cache = kmem_cache_create("zcache_objnode",
+ sizeof(struct tmem_objnode), 0, 0, NULL);
+ zcache_obj_cache = kmem_cache_create("zcache_obj",
+ sizeof(struct tmem_obj), 0, 0, NULL);
+ ramster_flnode_cache = kmem_cache_create("ramster_flnode",
+ sizeof(struct flushlist_node), 0, 0, NULL);
+#endif
+#ifdef CONFIG_CLEANCACHE
+ pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
+ ramster_enabled, use_cleancache);
+ if (ramster_enabled && use_cleancache) {
+ struct cleancache_ops old_ops;
+
+ zbud_init();
+ register_shrinker(&zcache_shrinker);
+ old_ops = zcache_cleancache_register_ops();
+ pr_info("ramster: cleancache enabled using kernel "
+ "transcendent memory and compression buddies\n");
+ if (old_ops.init_fs != NULL)
+ pr_warning("ramster: cleancache_ops overridden");
+ }
+#endif
+#ifdef CONFIG_FRONTSWAP
+ pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
+ ramster_enabled, use_frontswap);
+ if (ramster_enabled && use_frontswap) {
+ struct frontswap_ops old_ops;
+
+ zcache_new_client(LOCAL_CLIENT);
+ old_ops = zcache_frontswap_register_ops();
+ pr_info("ramster: frontswap enabled using kernel "
+ "transcendent memory and xvmalloc\n");
+ if (old_ops.init != NULL)
+ pr_warning("ramster: frontswap_ops overridden");
+ }
+ if (ramster_enabled && (use_frontswap || use_cleancache))
+ ramster_remotify_init();
+#endif
+out:
+ return ret;
+}
+
+module_init(zcache_init)
diff --git a/drivers/staging/ramster/zcache.h b/drivers/staging/ramster/zcache.h
new file mode 100644
index 00000000000..250b121c22e
--- /dev/null
+++ b/drivers/staging/ramster/zcache.h
@@ -0,0 +1,22 @@
+/*
+ * zcache.h
+ *
+ * External zcache functions
+ *
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _ZCACHE_H_
+#define _ZCACHE_H_
+
+extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
+ char *, size_t, bool, int);
+extern int zcache_autocreate_pool(int, int, bool);
+extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
+ char *, size_t *, bool, int);
+extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
+extern int zcache_flush_object(int, int, struct tmem_oid *);
+extern int zcache_localify(int, struct tmem_oid *, uint32_t,
+ char *, size_t, void *);
+
+#endif /* _ZCACHE_H */
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 04c23919f4d..e4ade550cfe 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -439,8 +439,7 @@ void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short
}
kfree(tmp);
tmp = next;
- }
- while (next != *buffer);
+ } while (next != *buffer);
*buffer = NULL;
}
@@ -1392,11 +1391,13 @@ void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
priv->bCurCCKPkt = bCckRate;
if (priv->UndecoratedSmoothedSS >= 0)
- priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) + (priv->SignalStrength * 10)) / 6;
+ priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
+ (priv->SignalStrength * 10)) / 6;
else
priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
- priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) + (priv->RxPower * 11)) / 60;
+ priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) +
+ (priv->RxPower * 11)) / 60;
if (bCckRate)
priv->CurCCKRSSI = priv->RSSI;
@@ -1607,43 +1608,50 @@ void rtl8180_rx(struct net_device *dev)
/* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
- bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) | (((*(priv->rxringtail)) & (0x04000000)) != 0)
- | (((*(priv->rxringtail)) & (0x08000000)) != 0) | (((~(*(priv->rxringtail))) & (0x10000000)) != 0) | (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
+ bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
+ (((*(priv->rxringtail)) & (0x04000000)) != 0) |
+ (((*(priv->rxringtail)) & (0x08000000)) != 0) |
+ (((~(*(priv->rxringtail))) & (0x10000000)) != 0) |
+ (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13;
bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12;
hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
- if ((IEEE80211_FTYPE_CTL != type) &&
- (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
- && (!bHwError) && (!bCRC) && (!bICV)) {
- /* Perform signal smoothing for dynamic
- * mechanism on demand. This is different
- * with PerformSignalSmoothing8185 in smoothing
- * fomula. No dramatic adjustion is apply
- * because dynamic mechanism need some degree
- * of correctness. */
- PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
-
- /* For good-looking singal strength. */
- SignalStrengthIndex = NetgearSignalStrengthTranslate(
- priv->LastSignalStrengthInPercent,
- priv->SignalStrength);
-
- priv->LastSignalStrengthInPercent = SignalStrengthIndex;
- priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
+ if (IEEE80211_FTYPE_CTL != type &&
+ !bHwError && !bCRC && !bICV &&
+ eqMacAddr(priv->ieee80211->current_network.bssid,
+ fc & IEEE80211_FCTL_TODS ? hdr->addr1 :
+ fc & IEEE80211_FCTL_FROMDS ? hdr->addr2 :
+ hdr->addr3)) {
+
+ /* Perform signal smoothing for dynamic
+ * mechanism on demand. This is different
+ * with PerformSignalSmoothing8185 in smoothing
+ * fomula. No dramatic adjustion is apply
+ * because dynamic mechanism need some degree
+ * of correctness. */
+ PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
+
+ /* For good-looking singal strength. */
+ SignalStrengthIndex = NetgearSignalStrengthTranslate(
+ priv->LastSignalStrengthInPercent,
+ priv->SignalStrength);
+
+ priv->LastSignalStrengthInPercent = SignalStrengthIndex;
+ priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
/*
* We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
* so we record the correct power here.
*/
- priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
- priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
+ priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
+ priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
/* Figure out which antenna that received the lasted packet. */
- priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
- SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
- }
+ priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
+ SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
+ }
if (first) {
if (!priv->rx_skb_complete) {
@@ -1654,7 +1662,7 @@ void rtl8180_rx(struct net_device *dev)
}
/* support for prism header has been originally added by Christian */
if (priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
-
+
} else {
priv->rx_skb = dev_alloc_skb(len+2);
if (!priv->rx_skb)
@@ -1766,7 +1774,7 @@ void rtl8180_data_hard_resume(struct net_device *dev)
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
-/*
+/*
* This function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
@@ -1810,7 +1818,7 @@ rate) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
-/*
+/*
* This is a rough attempt to TX a frame
* This is called by the ieee 80211 stack to TX management frames.
* If the ring is full packet are dropped (for data frame the queue
@@ -1916,7 +1924,7 @@ void rtl8180_prepare_beacon(struct net_device *dev)
}
}
-/*
+/*
* This function do the real dirty work: it enqueues a TX command
* descriptor in the ring buffer, copyes the frame in a TX buffer
* and kicks the NIC to ensure it does the DMA transfer.
@@ -2002,7 +2010,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
bRTSEnable = 0;
bCTSEnable = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
+ 0, bUseShortPreamble);
TxDescDuration = ThisFrameTime;
} else { /* Unicast packet */
u16 AckTime;
@@ -2040,7 +2049,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
bRTSEnable = 0;
RtsDur = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
+ 0, bUseShortPreamble);
TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
}
@@ -2184,7 +2194,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
priv->txhpbufstail = buflist;
break;
case BEACON_PRIORITY:
- /*
+ /*
* The HW seems to be happy with the 1st
* descriptor filled and the 2nd empty...
* So always update descriptor 1 and never
@@ -2304,13 +2314,13 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
spin_lock_irqsave(&priv->ps_lock, flags);
- /*
+ /*
* Writing HW register with 0 equals to disable
* the timer, that is not really what we want
*/
tl -= MSECS(4+16+7);
- /*
+ /*
* If the interval in witch we are requested to sleep is too
* short then give up and remain awake
*/
@@ -2325,10 +2335,10 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl);
priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
-
- queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp); /* as tl may be less than rb */
+ /* as tl may be less than rb */
+ queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp);
}
- /*
+ /*
* If we suspect the TimerInt is gone beyond tl
* while setting it, then give up
*/
@@ -3086,7 +3096,8 @@ void rtl8185_set_rate(struct net_device *dev)
max_rr_rate = ieeerate2rtlrate(240);
write_nic_byte(dev, RESP_RATE,
- max_rr_rate<<MAX_RESP_RATE_SHIFT | min_rr_rate<<MIN_RESP_RATE_SHIFT);
+ max_rr_rate<<MAX_RESP_RATE_SHIFT |
+ min_rr_rate<<MIN_RESP_RATE_SHIFT);
word = read_nic_word(dev, BRSR);
word &= ~BRSR_MBR_8185;
@@ -3168,7 +3179,7 @@ void rtl8180_adapter_start(struct net_device *dev)
netif_start_queue(dev);
}
-/*
+/*
* This configures registers for beacon tx and enables it via
* rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might
* be used to stop beacon transmission
@@ -3227,7 +3238,8 @@ void LeisurePSEnter(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST); /* IEEE80211_PS_ENABLE */
+ /* IEEE80211_PS_ENABLE */
+ MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
}
}
@@ -3299,7 +3311,10 @@ void rtl8180_watch_dog(struct net_device *dev)
u16 SlotIndex = 0;
u16 i = 0;
if (priv->ieee80211->actscanning == false) {
- if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) && (priv->ieee80211->state == IEEE80211_NOLINK) && (priv->ieee80211->beinretry == false) && (priv->eRFPowerState == eRfOn))
+ if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
+ (priv->ieee80211->state == IEEE80211_NOLINK) &&
+ (priv->ieee80211->beinretry == false) &&
+ (priv->eRFPowerState == eRfOn))
IPSEnter(dev);
}
/* YJ,add,080828,for link state check */
@@ -3732,7 +3747,7 @@ static int __init rtl8180_pci_module_init(void)
DMESG("Wireless extensions version %d", WIRELESS_EXT);
rtl8180_proc_module_init();
- if (pci_register_driver(&rtl8180_pci_driver)) {
+ if (pci_register_driver(&rtl8180_pci_driver)) {
DMESG("No device found");
return -ENODEV;
}
@@ -3839,7 +3854,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
return;
}
- /*
+ /*
* We check all the descriptors between the head and the nic,
* but not the currently pointed by the nic (the next to be txed)
* and the previous of the pointed (might be in process ??)
@@ -3877,7 +3892,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
head += 8;
}
- /*
+ /*
* The head has been moved to the last certainly TXed
* (or at least processed by the nic) packet.
* The driver take forcefully owning of all these packets
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index 261085d4b74..4d7a5951486 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -1,14 +1,8 @@
-//#include "r8180.h"
#include "r8180_dm.h"
#include "r8180_hw.h"
#include "r8180_93cx6.h"
-//{by amy 080312
-//
-// Description:
-// Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise.
-//
-//+by amy 080312
+ /* Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise. */
#define RATE_ADAPTIVE_TIMER_PERIOD 300
bool CheckHighPower(struct net_device *dev)
@@ -17,33 +11,26 @@ bool CheckHighPower(struct net_device *dev)
struct ieee80211_device *ieee = priv->ieee80211;
if(!priv->bRegHighPowerMechanism)
- {
return false;
- }
if(ieee->state == IEEE80211_LINKED_SCANNING)
- {
return false;
- }
return true;
}
-//
-// Description:
-// Update Tx power level if necessary.
-// See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
-//
-// Note:
-// The reason why we udpate Tx power level here instead of DoRxHighPower()
-// is the number of IO to change Tx power is much more than channel TR switch
-// and they are related to OFDM and MAC registers.
-// So, we don't want to update it so frequently in per-Rx packet base.
-//
-void
-DoTxHighPower(
- struct net_device *dev
- )
+/*
+ * Description:
+ * Update Tx power level if necessary.
+ * See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
+ *
+ * Note:
+ * The reason why we udpate Tx power level here instead of DoRxHighPower()
+ * is the number of IO to change Tx power is much more than channel TR switch
+ * and they are related to OFDM and MAC registers.
+ * So, we don't want to update it so frequently in per-Rx packet base.
+ */
+void DoTxHighPower(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 HiPwrUpperTh = 0;
@@ -53,8 +40,6 @@ DoTxHighPower(
u8 u1bTmp;
char OfdmTxPwrIdx, CckTxPwrIdx;
- //printk("----> DoTxHighPower()\n");
-
HiPwrUpperTh = priv->RegHiPwrUpperTh;
HiPwrLowerTh = priv->RegHiPwrLowerTh;
@@ -63,526 +48,411 @@ DoTxHighPower(
RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh;
RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh;
- //lzm add 080826
+ /* lzm add 080826 */
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
- // printk("DoTxHighPower() - UndecoratedSmoothedSS:%d, CurCCKRSSI = %d , bCurCCKPkt= %d \n", priv->UndecoratedSmoothedSS, priv->CurCCKRSSI, priv->bCurCCKPkt );
+ if ((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
+ (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh))) {
+ /* Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah */
- if((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
- (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh)))
- {
- // Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah
-
- // printk("=====>DoTxHighPower() - High Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrUpperTh );
priv->bToUpdateTxPwr = true;
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
- // If it never enter High Power.
- if( CckTxPwrIdx == u1bTmp)
- {
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
- write_nic_byte(dev, CCK_TXAGC, u1bTmp);
+ /* If it never enter High Power. */
+ if (CckTxPwrIdx == u1bTmp) {
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ write_nic_byte(dev, CCK_TXAGC, u1bTmp);
- u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
- write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
+ u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
}
- }
- else if((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
- (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh))
- {
- // printk("DoTxHighPower() - lower Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrLowerTh );
- if(priv->bToUpdateTxPwr)
- {
+ } else if ((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
+ (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh)) {
+ if (priv->bToUpdateTxPwr) {
priv->bToUpdateTxPwr = false;
- //SD3 required.
+ /* SD3 required. */
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
- if(u1bTmp < CckTxPwrIdx)
- {
- //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
- //write_nic_byte(dev, CCK_TXAGC, u1bTmp);
- write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
+ if (u1bTmp < CckTxPwrIdx) {
+ write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
}
u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
- if(u1bTmp < OfdmTxPwrIdx)
- {
- //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
- //write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
- write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
+ if (u1bTmp < OfdmTxPwrIdx) {
+ write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
}
}
-
- //printk("<---- DoTxHighPower()\n");
}
-//
-// Description:
-// Callback function of UpdateTxPowerWorkItem.
-// Because of some event happened, e.g. CCX TPC, High Power Mechanism,
-// We update Tx power of current channel again.
-//
-void rtl8180_tx_pw_wq (struct work_struct *work)
+/*
+ * Description:
+ * Callback function of UpdateTxPowerWorkItem.
+ * Because of some event happened, e.g. CCX TPC, High Power Mechanism,
+ * We update Tx power of current channel again.
+ */
+void rtl8180_tx_pw_wq(struct work_struct *work)
{
-// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
-// struct ieee80211_device * ieee = (struct ieee80211_device*)
-// container_of(work, struct ieee80211_device, watch_dog_wq);
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
- struct net_device *dev = ieee->dev;
-
-// printk("----> UpdateTxPowerWorkItemCallback()\n");
+ struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
+ struct net_device *dev = ieee->dev;
DoTxHighPower(dev);
-
-// printk("<---- UpdateTxPowerWorkItemCallback()\n");
}
-//
-// Description:
-// Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
-//
-bool
-CheckDig(
- struct net_device *dev
- )
+/*
+ * Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
+ */
+bool CheckDig(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
- if(!priv->bDigMechanism)
+ if (!priv->bDigMechanism)
return false;
- if(ieee->state != IEEE80211_LINKED)
+ if (ieee->state != IEEE80211_LINKED)
return false;
- //if(priv->CurrentOperaRate < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
- if((priv->ieee80211->rate/5) < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
+ if ((priv->ieee80211->rate / 5) < 36) /* Schedule Dig under all OFDM rates. By Bruce, 2007-06-01. */
return false;
return true;
}
-//
-// Description:
-// Implementation of DIG for Zebra and Zebra2.
-//
-void
-DIG_Zebra(
- struct net_device *dev
- )
+/*
+ * Implementation of DIG for Zebra and Zebra2.
+ */
+void DIG_Zebra(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 CCKFalseAlarm, OFDMFalseAlarm;
u16 OfdmFA1, OfdmFA2;
- int InitialGainStep = 7; // The number of initial gain stages.
- int LowestGainStage = 4; // The capable lowest stage of performing dig workitem.
- u32 AwakePeriodIn2Sec=0;
-
- //printk("---------> DIG_Zebra()\n");
+ int InitialGainStep = 7; /* The number of initial gain stages. */
+ int LowestGainStage = 4; /* The capable lowest stage of performing dig workitem. */
+ u32 AwakePeriodIn2Sec = 0;
CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff);
OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff);
OfdmFA1 = 0x15;
OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8;
-// printk("DIG**********CCK False Alarm: %#X \n",CCKFalseAlarm);
-// printk("DIG**********OFDM False Alarm: %#X \n",OFDMFalseAlarm);
-
- // The number of initial gain steps is different, by Bruce, 2007-04-13.
- if (priv->InitialGain == 0 ) //autoDIG
- { // Advised from SD3 DZ
- priv->InitialGain = 4; // In 87B, m74dBm means State 4 (m82dBm)
- }
- { // Advised from SD3 DZ
- OfdmFA1 = 0x20;
+ /* The number of initial gain steps is different, by Bruce, 2007-04-13. */
+ if (priv->InitialGain == 0) { /* autoDIG */
+ /* Advised from SD3 DZ */
+ priv->InitialGain = 4; /* In 87B, m74dBm means State 4 (m82dBm) */
}
-
-#if 1 //lzm reserved 080826
- AwakePeriodIn2Sec = (2000-priv ->DozePeriodInPast2Sec);
- //printk("&&& DozePeriod=%d AwakePeriod=%d\n", priv->DozePeriodInPast2Sec, AwakePeriodIn2Sec);
- priv ->DozePeriodInPast2Sec=0;
-
- if(AwakePeriodIn2Sec)
- {
- //RT_TRACE(COMP_DIG, DBG_TRACE, ("DIG: AwakePeriodIn2Sec(%d) - FATh(0x%X , 0x%X) ->",AwakePeriodIn2Sec, OfdmFA1, OfdmFA2));
- // adjuest DIG threshold.
- OfdmFA1 = (u16)((OfdmFA1*AwakePeriodIn2Sec) / 2000) ;
- OfdmFA2 = (u16)((OfdmFA2*AwakePeriodIn2Sec) / 2000) ;
- //RT_TRACE(COMP_DIG, DBG_TRACE, ("( 0x%X , 0x%X)\n", OfdmFA1, OfdmFA2));
- }
- else
- {
- ;//RT_TRACE(COMP_DIG, DBG_WARNING, ("ERROR!! AwakePeriodIn2Sec should not be ZERO!!\n"));
+ /* Advised from SD3 DZ */
+ OfdmFA1 = 0x20;
+
+#if 1 /* lzm reserved 080826 */
+ AwakePeriodIn2Sec = (2000 - priv->DozePeriodInPast2Sec);
+ priv ->DozePeriodInPast2Sec = 0;
+
+ if (AwakePeriodIn2Sec) {
+ OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000) ;
+ OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000) ;
+ } else {
+ ;
}
#endif
InitialGainStep = 8;
- LowestGainStage = priv->RegBModeGainStage; // Lowest gain stage.
+ LowestGainStage = priv->RegBModeGainStage; /* Lowest gain stage. */
- if (OFDMFalseAlarm > OfdmFA1)
- {
- if (OFDMFalseAlarm > OfdmFA2)
- {
+ if (OFDMFalseAlarm > OfdmFA1) {
+ if (OFDMFalseAlarm > OfdmFA2) {
priv->DIG_NumberFallbackVote++;
- if (priv->DIG_NumberFallbackVote >1)
- {
- //serious OFDM False Alarm, need fallback
- if (priv->InitialGain < InitialGainStep)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ if (priv->DIG_NumberFallbackVote > 1) {
+ /* serious OFDM False Alarm, need fallback */
+ if (priv->InitialGain < InitialGainStep) {
+ priv->InitialGainBackUp = priv->InitialGain;
priv->InitialGain = (priv->InitialGain + 1);
-// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
-// printk("DIG+++++++ fallback OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
- priv->DIG_NumberUpgradeVote=0;
+ priv->DIG_NumberUpgradeVote = 0;
}
- }
- else
- {
+ } else {
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
}
- priv->DIG_NumberUpgradeVote=0;
- }
- else
- {
+ priv->DIG_NumberUpgradeVote = 0;
+ } else {
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
priv->DIG_NumberUpgradeVote++;
- if (priv->DIG_NumberUpgradeVote>9)
- {
- if (priv->InitialGain > LowestGainStage) // In 87B, m78dBm means State 4 (m864dBm)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ if (priv->DIG_NumberUpgradeVote > 9) {
+ if (priv->InitialGain > LowestGainStage) { /* In 87B, m78dBm means State 4 (m864dBm) */
+ priv->InitialGainBackUp = priv->InitialGain;
priv->InitialGain = (priv->InitialGain - 1);
-// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
-// printk("DIG--------- Upgrade OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
- priv->DIG_NumberUpgradeVote=0;
+ priv->DIG_NumberUpgradeVote = 0;
}
}
-
-// printk("DIG+++++++ OFDM:%d\n", priv->InitialGain);
- //printk("<--------- DIG_Zebra()\n");
}
-//
-// Description:
-// Dispatch DIG implementation according to RF.
-//
-void
-DynamicInitGain(struct net_device *dev)
+/*
+ * Dispatch DIG implementation according to RF.
+ */
+void DynamicInitGain(struct net_device *dev)
{
DIG_Zebra(dev);
}
-void rtl8180_hw_dig_wq (struct work_struct *work)
+void rtl8180_hw_dig_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
- struct net_device *dev = ieee->dev;
+ struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
+ struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
- // Read CCK and OFDM False Alarm.
+ /* Read CCK and OFDM False Alarm. */
priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM);
- // Adjust Initial Gain dynamically.
+ /* Adjust Initial Gain dynamically. */
DynamicInitGain(dev);
}
-int
-IncludedInSupportedRates(
- struct r8180_priv *priv,
- u8 TxRate )
+int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
{
- u8 rate_len;
- u8 rate_ex_len;
- u8 RateMask = 0x7F;
- u8 idx;
- unsigned short Found = 0;
- u8 NaiveTxRate = TxRate&RateMask;
-
- rate_len = priv->ieee80211->current_network.rates_len;
- rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
- for( idx=0; idx< rate_len; idx++ )
- {
- if( (priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate )
- {
- Found = 1;
- goto found_rate;
- }
- }
- for( idx=0; idx< rate_ex_len; idx++ )
- {
- if( (priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate )
- {
- Found = 1;
- goto found_rate;
- }
- }
- return Found;
- found_rate:
- return Found;
+ u8 rate_len;
+ u8 rate_ex_len;
+ u8 RateMask = 0x7F;
+ u8 idx;
+ unsigned short Found = 0;
+ u8 NaiveTxRate = TxRate&RateMask;
+
+ rate_len = priv->ieee80211->current_network.rates_len;
+ rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
+ for (idx=0; idx < rate_len; idx++) {
+ if ((priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate) {
+ Found = 1;
+ goto found_rate;
+ }
+ }
+ for (idx = 0; idx < rate_ex_len; idx++) {
+ if ((priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate) {
+ Found = 1;
+ goto found_rate;
+ }
+ }
+ return Found;
+ found_rate:
+ return Found;
}
-//
-// Description:
-// Get the Tx rate one degree up form the input rate in the supported rates.
-// Return the upgrade rate if it is successed, otherwise return the input rate.
-// By Bruce, 2007-06-05.
-//
-u8
-GetUpgradeTxRate(
- struct net_device *dev,
- u8 rate
- )
+/*
+ * Get the Tx rate one degree up form the input rate in the supported rates.
+ * Return the upgrade rate if it is successed, otherwise return the input rate.
+ */
+u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
- u8 UpRate;
-
- // Upgrade 1 degree.
- switch(rate)
- {
- case 108: // Up to 54Mbps.
- UpRate = 108;
- break;
-
- case 96: // Up to 54Mbps.
- UpRate = 108;
- break;
-
- case 72: // Up to 48Mbps.
- UpRate = 96;
- break;
-
- case 48: // Up to 36Mbps.
- UpRate = 72;
- break;
-
- case 36: // Up to 24Mbps.
- UpRate = 48;
- break;
-
- case 22: // Up to 18Mbps.
- UpRate = 36;
- break;
-
- case 11: // Up to 11Mbps.
- UpRate = 22;
- break;
-
- case 4: // Up to 5.5Mbps.
- UpRate = 11;
- break;
-
- case 2: // Up to 2Mbps.
- UpRate = 4;
- break;
-
- default:
- printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
- return rate;
- }
- // Check if the rate is valid.
- if(IncludedInSupportedRates(priv, UpRate))
- {
-// printk("GetUpgradeTxRate(): GetUpgrade Tx rate(%d) from %d !\n", UpRate, priv->CurrentOperaRate);
- return UpRate;
- }
- else
- {
- //printk("GetUpgradeTxRate(): Tx rate (%d) is not in supported rates\n", UpRate);
- return rate;
- }
- return rate;
+ struct r8180_priv *priv = ieee80211_priv(dev);
+ u8 UpRate;
+
+ /* Upgrade 1 degree. */
+ switch (rate) {
+ case 108: /* Up to 54Mbps. */
+ UpRate = 108;
+ break;
+
+ case 96: /* Up to 54Mbps. */
+ UpRate = 108;
+ break;
+
+ case 72: /* Up to 48Mbps. */
+ UpRate = 96;
+ break;
+
+ case 48: /* Up to 36Mbps. */
+ UpRate = 72;
+ break;
+
+ case 36: /* Up to 24Mbps. */
+ UpRate = 48;
+ break;
+
+ case 22: /* Up to 18Mbps. */
+ UpRate = 36;
+ break;
+
+ case 11: /* Up to 11Mbps. */
+ UpRate = 22;
+ break;
+
+ case 4: /* Up to 5.5Mbps. */
+ UpRate = 11;
+ break;
+
+ case 2: /* Up to 2Mbps. */
+ UpRate = 4;
+ break;
+
+ default:
+ printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
+ return rate;
+ }
+ /* Check if the rate is valid. */
+ if (IncludedInSupportedRates(priv, UpRate)) {
+ return UpRate;
+ } else {
+ return rate;
+ }
+ return rate;
}
-//
-// Description:
-// Get the Tx rate one degree down form the input rate in the supported rates.
-// Return the degrade rate if it is successed, otherwise return the input rate.
-// By Bruce, 2007-06-05.
-//
-u8
-GetDegradeTxRate(
- struct net_device *dev,
- u8 rate
- )
+/*
+ * Get the Tx rate one degree down form the input rate in the supported rates.
+ * Return the degrade rate if it is successed, otherwise return the input rate.
+ */
+
+u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
- u8 DownRate;
-
- // Upgrade 1 degree.
- switch(rate)
- {
- case 108: // Down to 48Mbps.
- DownRate = 96;
- break;
-
- case 96: // Down to 36Mbps.
- DownRate = 72;
- break;
-
- case 72: // Down to 24Mbps.
- DownRate = 48;
- break;
-
- case 48: // Down to 18Mbps.
- DownRate = 36;
- break;
-
- case 36: // Down to 11Mbps.
- DownRate = 22;
- break;
-
- case 22: // Down to 5.5Mbps.
- DownRate = 11;
- break;
-
- case 11: // Down to 2Mbps.
- DownRate = 4;
- break;
-
- case 4: // Down to 1Mbps.
- DownRate = 2;
- break;
-
- case 2: // Down to 1Mbps.
- DownRate = 2;
- break;
-
- default:
- printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
- return rate;
- }
- // Check if the rate is valid.
- if(IncludedInSupportedRates(priv, DownRate))
- {
-// printk("GetDegradeTxRate(): GetDegrade Tx rate(%d) from %d!\n", DownRate, priv->CurrentOperaRate);
- return DownRate;
- }
- else
- {
- //printk("GetDegradeTxRate(): Tx rate (%d) is not in supported rates\n", DownRate);
- return rate;
- }
- return rate;
+ struct r8180_priv *priv = ieee80211_priv(dev);
+ u8 DownRate;
+
+ /* Upgrade 1 degree. */
+ switch (rate) {
+ case 108: /* Down to 48Mbps. */
+ DownRate = 96;
+ break;
+
+ case 96: /* Down to 36Mbps. */
+ DownRate = 72;
+ break;
+
+ case 72: /* Down to 24Mbps. */
+ DownRate = 48;
+ break;
+
+ case 48: /* Down to 18Mbps. */
+ DownRate = 36;
+ break;
+
+ case 36: /* Down to 11Mbps. */
+ DownRate = 22;
+ break;
+
+ case 22: /* Down to 5.5Mbps. */
+ DownRate = 11;
+ break;
+
+ case 11: /* Down to 2Mbps. */
+ DownRate = 4;
+ break;
+
+ case 4: /* Down to 1Mbps. */
+ DownRate = 2;
+ break;
+
+ case 2: /* Down to 1Mbps. */
+ DownRate = 2;
+ break;
+
+ default:
+ printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
+ return rate;
+ }
+ /* Check if the rate is valid. */
+ if (IncludedInSupportedRates(priv, DownRate)) {
+ return DownRate;
+ } else {
+ return rate;
+ }
+ return rate;
}
-//
-// Helper function to determine if specified data rate is
-// CCK rate.
-// 2005.01.25, by rcnjko.
-//
-bool
-MgntIsCckRate(
- u16 rate
- )
+/*
+ * Helper function to determine if specified data rate is
+ * CCK rate.
+ */
+
+bool MgntIsCckRate(u16 rate)
{
- bool bReturn = false;
+ bool bReturn = false;
- if((rate <= 22) && (rate != 12) && (rate != 18))
- {
- bReturn = true;
- }
+ if ((rate <= 22) && (rate != 12) && (rate != 18)) {
+ bReturn = true;
+ }
- return bReturn;
+ return bReturn;
}
-//
-// Description:
-// Tx Power tracking mechanism routine on 87SE.
-// Created by Roger, 2007.12.11.
-//
-void
-TxPwrTracking87SE(
- struct net_device *dev
-)
+/*
+ * Description:
+ * Tx Power tracking mechanism routine on 87SE.
+ */
+void TxPwrTracking87SE(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 tmpu1Byte, CurrentThermal, Idx;
char CckTxPwrIdx, OfdmTxPwrIdx;
- //u32 u4bRfReg;
tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
- CurrentThermal = (tmpu1Byte & 0xf0)>>4; //[ 7:4]: thermal meter indication.
- CurrentThermal = (CurrentThermal>0x0c)? 0x0c:CurrentThermal;//lzm add 080826
-
- //printk("TxPwrTracking87SE(): CurrentThermal(%d)\n", CurrentThermal);
+ CurrentThermal = (tmpu1Byte & 0xf0) >> 4; /*[ 7:4]: thermal meter indication. */
+ CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c:CurrentThermal;/* lzm add 080826 */
- if( CurrentThermal != priv->ThermalMeter)
- {
-// printk("TxPwrTracking87SE(): Thermal meter changed!!!\n");
-
- // Update Tx Power level on each channel.
- for(Idx = 1; Idx<15; Idx++)
- {
+ if (CurrentThermal != priv->ThermalMeter) {
+ /* Update Tx Power level on each channel. */
+ for (Idx = 1; Idx < 15; Idx++) {
CckTxPwrIdx = priv->chtxpwr[Idx];
OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx];
- if( CurrentThermal > priv->ThermalMeter )
- { // higher thermal meter.
- CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
- OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
+ if (CurrentThermal > priv->ThermalMeter) {
+ /* higher thermal meter. */
+ CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
+ OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
- if(CckTxPwrIdx >35)
- CckTxPwrIdx = 35; // Force TxPower to maximal index.
- if(OfdmTxPwrIdx >35)
+ if (CckTxPwrIdx > 35)
+ CckTxPwrIdx = 35; /* Force TxPower to maximal index. */
+ if (OfdmTxPwrIdx > 35)
OfdmTxPwrIdx = 35;
- }
- else
- { // lower thermal meter.
- CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
- OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
+ } else {
+ /* lower thermal meter. */
+ CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
+ OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
- if(CckTxPwrIdx <0)
+ if (CckTxPwrIdx < 0)
CckTxPwrIdx = 0;
- if(OfdmTxPwrIdx <0)
+ if (OfdmTxPwrIdx < 0)
OfdmTxPwrIdx = 0;
}
- // Update TxPower level on CCK and OFDM resp.
+ /* Update TxPower level on CCK and OFDM resp. */
priv->chtxpwr[Idx] = CckTxPwrIdx;
priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx;
}
- // Update TxPower level immediately.
+ /* Update TxPower level immediately. */
rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel);
}
priv->ThermalMeter = CurrentThermal;
}
-void
-StaRateAdaptive87SE(
- struct net_device *dev
- )
+void StaRateAdaptive87SE(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- unsigned long CurrTxokCnt;
- u16 CurrRetryCnt;
- u16 CurrRetryRate;
- //u16 i,idx;
- unsigned long CurrRxokCnt;
- bool bTryUp = false;
- bool bTryDown = false;
- u8 TryUpTh = 1;
- u8 TryDownTh = 2;
- u32 TxThroughput;
+ unsigned long CurrTxokCnt;
+ u16 CurrRetryCnt;
+ u16 CurrRetryRate;
+ unsigned long CurrRxokCnt;
+ bool bTryUp = false;
+ bool bTryDown = false;
+ u8 TryUpTh = 1;
+ u8 TryDownTh = 2;
+ u32 TxThroughput;
long CurrSignalStrength;
bool bUpdateInitialGain = false;
- u8 u1bOfdm=0, u1bCck = 0;
+ u8 u1bOfdm = 0, u1bCck = 0;
char OfdmTxPwrIdx, CckTxPwrIdx;
- priv->RateAdaptivePeriod= RATE_ADAPTIVE_TIMER_PERIOD;
+ priv->RateAdaptivePeriod = RATE_ADAPTIVE_TIMER_PERIOD;
CurrRetryCnt = priv->CurrRetryCnt;
@@ -591,707 +461,462 @@ StaRateAdaptive87SE(
CurrSignalStrength = priv->Stats_RecvSignalPower;
TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes);
priv->LastTxOKBytes = priv->NumTxOkBytesTotal;
- priv->CurrentOperaRate = priv->ieee80211->rate/5;
- //printk("priv->CurrentOperaRate is %d\n",priv->CurrentOperaRate);
- //2 Compute retry ratio.
- if (CurrTxokCnt>0)
- {
- CurrRetryRate = (u16)(CurrRetryCnt*100/CurrTxokCnt);
+ priv->CurrentOperaRate = priv->ieee80211->rate / 5;
+ /* 2 Compute retry ratio. */
+ if (CurrTxokCnt > 0) {
+ CurrRetryRate = (u16)(CurrRetryCnt * 100 / CurrTxokCnt);
+ } else {
+ /* It may be serious retry. To distinguish serious retry or no packets modified by Bruce */
+ CurrRetryRate = (u16)(CurrRetryCnt * 100 / 1);
}
- else
- { // It may be serious retry. To distinguish serious retry or no packets modified by Bruce
- CurrRetryRate = (u16)(CurrRetryCnt*100/1);
- }
-
-
- //
- // Added by Roger, 2007.01.02.
- // For debug information.
- //
- //printk("\n(1) pHalData->LastRetryRate: %d \n",priv->LastRetryRate);
- //printk("(2) RetryCnt = %d \n", CurrRetryCnt);
- //printk("(3) TxokCnt = %d \n", CurrTxokCnt);
- //printk("(4) CurrRetryRate = %d \n", CurrRetryRate);
- //printk("(5) CurrSignalStrength = %d \n",CurrSignalStrength);
- //printk("(6) TxThroughput is %d\n",TxThroughput);
- //printk("priv->NumTxOkBytesTotal is %d\n",priv->NumTxOkBytesTotal);
priv->LastRetryCnt = priv->CurrRetryCnt;
priv->LastTxokCnt = priv->NumTxOkTotal;
priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal;
priv->CurrRetryCnt = 0;
- //2No Tx packets, return to init_rate or not?
- if (CurrRetryRate==0 && CurrTxokCnt == 0)
- {
- //
- //After 9 (30*300ms) seconds in this condition, we try to raise rate.
- //
+ /* 2No Tx packets, return to init_rate or not? */
+ if (CurrRetryRate == 0 && CurrTxokCnt == 0) {
+ /*
+ * After 9 (30*300ms) seconds in this condition, we try to raise rate.
+ */
priv->TryupingCountNoData++;
-// printk("No Tx packets, TryupingCountNoData(%d)\n", priv->TryupingCountNoData);
- //[TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00
- if (priv->TryupingCountNoData>30)
- {
+ /* [TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00 */
+ if (priv->TryupingCountNoData > 30) {
priv->TryupingCountNoData = 0;
- priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
- // Reset Fail Record
+ priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
+ /* Reset Fail Record */
priv->LastFailTxRate = 0;
priv->LastFailTxRateSS = -200;
priv->FailTxRateCount = 0;
}
goto SetInitialGain;
- }
- else
- {
- priv->TryupingCountNoData=0; //Reset trying up times.
+ } else {
+ priv->TryupingCountNoData = 0; /*Reset trying up times. */
}
- //
- // For Netgear case, I comment out the following signal strength estimation,
- // which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
- // 2007.04.09, by Roger.
- //
-
- //
- // Restructure rate adaptive as the following main stages:
- // (1) Add retry threshold in 54M upgrading condition with signal strength.
- // (2) Add the mechanism to degrade to CCK rate according to signal strength
- // and retry rate.
- // (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
- // situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
- // (4) Add the mehanism of trying to upgrade tx rate.
- // (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
- // By Bruce, 2007-06-05.
- //
- //
-
- // 11Mbps or 36Mbps
- // Check more times in these rate(key rates).
- //
- if(priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
- {
+ /*
+ * For Netgear case, I comment out the following signal strength estimation,
+ * which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
+ *
+ * Restructure rate adaptive as the following main stages:
+ * (1) Add retry threshold in 54M upgrading condition with signal strength.
+ * (2) Add the mechanism to degrade to CCK rate according to signal strength
+ * and retry rate.
+ * (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
+ * situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
+ * (4) Add the mehanism of trying to upgrade tx rate.
+ * (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
+ *
+ */
+
+ /*
+ * 11Mbps or 36Mbps
+ * Check more times in these rate(key rates).
+ */
+ if (priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
TryUpTh += 9;
- }
- //
- // Let these rates down more difficult.
- //
- if(MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
- {
- TryDownTh += 1;
- }
-
- //1 Adjust Rate.
- if (priv->bTryuping == true)
- {
- //2 For Test Upgrading mechanism
- // Note:
- // Sometimes the throughput is upon on the capability bwtween the AP and NIC,
- // thus the low data rate does not improve the performance.
- // We randomly upgrade the data rate and check if the retry rate is improved.
-
- // Upgrading rate did not improve the retry rate, fallback to the original rate.
- if ( (CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput)
- {
- //Not necessary raising rate, fall back rate.
+ /*
+ * Let these rates down more difficult.
+ */
+ if (MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
+ TryDownTh += 1;
+
+ /* 1 Adjust Rate. */
+ if (priv->bTryuping == true) {
+ /* 2 For Test Upgrading mechanism
+ * Note:
+ * Sometimes the throughput is upon on the capability bwtween the AP and NIC,
+ * thus the low data rate does not improve the performance.
+ * We randomly upgrade the data rate and check if the retry rate is improved.
+ */
+
+ /* Upgrading rate did not improve the retry rate, fallback to the original rate. */
+ if ((CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput) {
+ /*Not necessary raising rate, fall back rate. */
bTryDown = true;
- //printk("case1-1: Not necessary raising rate, fall back rate....\n");
- //printk("case1-1: pMgntInfo->CurrentOperaRate =%d, TxThroughput = %d, LastThroughput = %d\n",
- // priv->CurrentOperaRate, TxThroughput, priv->LastTxThroughput);
- }
- else
- {
+ } else {
priv->bTryuping = false;
}
- }
- else if (CurrSignalStrength > -47 && (CurrRetryRate < 50))
- {
- //2For High Power
- //
- // Added by Roger, 2007.04.09.
- // Return to highest data rate, if signal strength is good enough.
- // SignalStrength threshold(-50dbm) is for RTL8186.
- // Revise SignalStrength threshold to -51dbm.
- //
- // Also need to check retry rate for safety, by Bruce, 2007-06-05.
- if(priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate )
- {
+ } else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) {
+ /*
+ * 2For High Power
+ *
+ * Return to highest data rate, if signal strength is good enough.
+ * SignalStrength threshold(-50dbm) is for RTL8186.
+ * Revise SignalStrength threshold to -51dbm.
+ */
+ /* Also need to check retry rate for safety, by Bruce, 2007-06-05. */
+ if (priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate) {
bTryUp = true;
- // Upgrade Tx Rate directly.
+ /* Upgrade Tx Rate directly. */
priv->TryupingCount += TryUpTh;
}
-// printk("case2: StaRateAdaptive87SE: Power(%d) is high enough!!. \n", CurrSignalStrength);
- }
- else if(CurrTxokCnt > 9 && CurrTxokCnt< 100 && CurrRetryRate >= 600)
- {
- //2 For Serious Retry
- //
- // Traffic is not busy but our Tx retry is serious.
- //
+ } else if (CurrTxokCnt > 9 && CurrTxokCnt < 100 && CurrRetryRate >= 600) {
+ /*
+ *2 For Serious Retry
+ *
+ * Traffic is not busy but our Tx retry is serious.
+ */
bTryDown = true;
- // Let Rate Mechanism to degrade tx rate directly.
+ /* Let Rate Mechanism to degrade tx rate directly. */
priv->TryDownCountLowData += TryDownTh;
-// printk("case3: RA: Tx Retry is serious. Degrade Tx Rate to %d directly...\n", priv->CurrentOperaRate);
- }
- else if ( priv->CurrentOperaRate == 108 )
- {
- //2For 54Mbps
- // Air Link
- if ( (CurrRetryRate>26)&&(priv->LastRetryRate>25))
-// if ( (CurrRetryRate>40)&&(priv->LastRetryRate>39))
- {
- //Down to rate 48Mbps.
+ } else if (priv->CurrentOperaRate == 108) {
+ /* 2For 54Mbps */
+ /* Air Link */
+ if ((CurrRetryRate > 26) && (priv->LastRetryRate > 25)) {
bTryDown = true;
}
- // Cable Link
- else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
-// else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
- {
- //Down to rate 48Mbps.
+ /* Cable Link */
+ else if ((CurrRetryRate > 17) && (priv->LastRetryRate > 16) && (CurrSignalStrength > -72)) {
bTryDown = true;
}
- if(bTryDown && (CurrSignalStrength < -75)) //cable link
- {
+ if (bTryDown && (CurrSignalStrength < -75)) /* cable link */
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case4---54M \n");
-
}
- else if ( priv->CurrentOperaRate == 96 )
- {
- //2For 48Mbps
- //Air Link
- if ( ((CurrRetryRate>48) && (priv->LastRetryRate>47)))
-// if ( ((CurrRetryRate>65) && (priv->LastRetryRate>64)))
-
- {
- //Down to rate 36Mbps.
+ else if (priv->CurrentOperaRate == 96) {
+ /* 2For 48Mbps */
+ /* Air Link */
+ if (((CurrRetryRate > 48) && (priv->LastRetryRate > 47))) {
bTryDown = true;
- }
- //Cable Link
- else if ( ((CurrRetryRate>21) && (priv->LastRetryRate>20)) && (CurrSignalStrength > -74))
- {
- //Down to rate 36Mbps.
+ } else if (((CurrRetryRate > 21) && (priv->LastRetryRate > 20)) && (CurrSignalStrength > -74)) { /* Cable Link */
+ /* Down to rate 36Mbps. */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<8) && (priv->LastRetryRate<8) ) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<28) && (priv->LastRetryRate<8) )
- {
+ } else if ((CurrRetryRate < 8) && (priv->LastRetryRate < 8)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -75))
- {
+ if (bTryDown && (CurrSignalStrength < -75)){
priv->TryDownCountLowData += TryDownTh;
}
- //printk("case5---48M \n");
- }
- else if ( priv->CurrentOperaRate == 72 )
- {
- //2For 36Mbps
- if ( (CurrRetryRate>43) && (priv->LastRetryRate>41))
-// if ( (CurrRetryRate>60) && (priv->LastRetryRate>59))
- {
- //Down to rate 24Mbps.
+ } else if (priv->CurrentOperaRate == 72) {
+ /* 2For 36Mbps */
+ if ((CurrRetryRate > 43) && (priv->LastRetryRate > 41)) {
+ /* Down to rate 24Mbps. */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<15) && (priv->LastRetryRate<16)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<35) && (priv->LastRetryRate<36))
- {
+ } else if ((CurrRetryRate < 15) && (priv->LastRetryRate < 16)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -80))
- {
+ if (bTryDown && (CurrSignalStrength < -80))
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case6---36M \n");
- }
- else if ( priv->CurrentOperaRate == 48 )
- {
- //2For 24Mbps
- // Air Link
- if ( ((CurrRetryRate>63) && (priv->LastRetryRate>62)))
-// if ( ((CurrRetryRate>83) && (priv->LastRetryRate>82)))
- {
- //Down to rate 18Mbps.
+
+ } else if (priv->CurrentOperaRate == 48) {
+ /* 2For 24Mbps */
+ /* Air Link */
+ if (((CurrRetryRate > 63) && (priv->LastRetryRate > 62))) {
bTryDown = true;
- }
- //Cable Link
- else if ( ((CurrRetryRate>33) && (priv->LastRetryRate>32)) && (CurrSignalStrength > -82) )
-// else if ( ((CurrRetryRate>50) && (priv->LastRetryRate>49)) && (CurrSignalStrength > -82) )
- {
- //Down to rate 18Mbps.
+ } else if (((CurrRetryRate > 33) && (priv->LastRetryRate > 32)) && (CurrSignalStrength > -82)) { /* Cable Link */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
-
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2 )) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<20) && (priv->LastRetryRate<21)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<40) && (priv->LastRetryRate<41))
- {
+ } else if ((CurrRetryRate < 20) && (priv->LastRetryRate < 21)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -82))
- {
+ if (bTryDown && (CurrSignalStrength < -82))
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case7---24M \n");
- }
- else if ( priv->CurrentOperaRate == 36 )
- {
- //2For 18Mbps
- // original (109, 109)
- //[TRC Dell Lab] (90, 91), Isaiah 2008-02-18 23:24
- // (85, 86), Isaiah 2008-02-18 24:00
- if ( ((CurrRetryRate>85) && (priv->LastRetryRate>86)))
-// if ( ((CurrRetryRate>115) && (priv->LastRetryRate>116)))
- {
- //Down to rate 11Mbps.
+
+ } else if (priv->CurrentOperaRate == 36) {
+ if (((CurrRetryRate > 85) && (priv->LastRetryRate > 86))) {
bTryDown = true;
- }
- //[TRC Dell Lab] Isaiah 2008-02-18 23:24
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<22) && (priv->LastRetryRate<23)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<42) && (priv->LastRetryRate<43))
- {
+ } else if ((CurrRetryRate < 22) && (priv->LastRetryRate < 23)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- //printk("case8---18M \n");
- }
- else if ( priv->CurrentOperaRate == 22 )
- {
- //2For 11Mbps
- if (CurrRetryRate>95)
-// if (CurrRetryRate>155)
- {
+ } else if (priv->CurrentOperaRate == 22) {
+ /* 2For 11Mbps */
+ if (CurrRetryRate > 95) {
bTryDown = true;
}
- else if ( (CurrRetryRate<29) && (priv->LastRetryRate <30) )//TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<49) && (priv->LastRetryRate <50) )
- {
+ else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
bTryUp = true;
- }
- //printk("case9---11M \n");
}
- else if ( priv->CurrentOperaRate == 11 )
- {
- //2For 5.5Mbps
- if (CurrRetryRate>149)
-// if (CurrRetryRate>189)
- {
+ } else if (priv->CurrentOperaRate == 11) {
+ /* 2For 5.5Mbps */
+ if (CurrRetryRate > 149) {
bTryDown = true;
- }
- else if ( (CurrRetryRate<60) && (priv->LastRetryRate < 65))
-// else if ( (CurrRetryRate<80) && (priv->LastRetryRate < 85))
-
- {
+ } else if ((CurrRetryRate < 60) && (priv->LastRetryRate < 65)) {
bTryUp = true;
- }
- //printk("case10---5.5M \n");
}
- else if ( priv->CurrentOperaRate == 4 )
- {
- //2For 2 Mbps
- if((CurrRetryRate>99) && (priv->LastRetryRate>99))
-// if((CurrRetryRate>199) && (priv->LastRetryRate>199))
- {
+ } else if (priv->CurrentOperaRate == 4) {
+ /* 2For 2 Mbps */
+ if ((CurrRetryRate > 99) && (priv->LastRetryRate > 99)) {
bTryDown = true;
- }
- else if ( (CurrRetryRate < 65) && (priv->LastRetryRate < 70))
-// else if ( (CurrRetryRate < 85) && (priv->LastRetryRate < 90))
- {
+ } else if ((CurrRetryRate < 65) && (priv->LastRetryRate < 70)) {
bTryUp = true;
}
- //printk("case11---2M \n");
- }
- else if ( priv->CurrentOperaRate == 2 )
- {
- //2For 1 Mbps
- if( (CurrRetryRate<70) && (priv->LastRetryRate<75))
-// if( (CurrRetryRate<90) && (priv->LastRetryRate<95))
- {
+ } else if (priv->CurrentOperaRate == 2) {
+ /* 2For 1 Mbps */
+ if ((CurrRetryRate < 70) && (priv->LastRetryRate < 75)) {
bTryUp = true;
}
- //printk("case12---1M \n");
}
- if(bTryUp && bTryDown)
- printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
-
- //1 Test Upgrading Tx Rate
- // Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
- // To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
- if(!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
- && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2)
- {
- if(jiffies% (CurrRetryRate + 101) == 0)
- {
+ if (bTryUp && bTryDown)
+ printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
+
+ /* 1 Test Upgrading Tx Rate
+ * Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
+ * To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
+ */
+ if (!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
+ && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) {
+ if (jiffies % (CurrRetryRate + 101) == 0) {
bTryUp = true;
priv->bTryuping = true;
- //printk("StaRateAdaptive87SE(): Randomly try upgrading...\n");
}
}
- //1 Rate Mechanism
- if(bTryUp)
- {
+ /* 1 Rate Mechanism */
+ if (bTryUp) {
priv->TryupingCount++;
priv->TryDownCountLowData = 0;
- {
-// printk("UP: pHalData->TryupingCount = %d\n", priv->TryupingCount);
-// printk("UP: TryUpTh(%d)+ (FailTxRateCount(%d))^2 =%d\n",
-// TryUpTh, priv->FailTxRateCount, (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount) );
-// printk("UP: pHalData->bTryuping=%d\n", priv->bTryuping);
-
- }
+ /*
+ * Check more times if we need to upgrade indeed.
+ * Because the largest value of pHalData->TryupingCount is 0xFFFF and
+ * the largest value of pHalData->FailTxRateCount is 0x14,
+ * this condition will be satisfied at most every 2 min.
+ */
- //
- // Check more times if we need to upgrade indeed.
- // Because the largest value of pHalData->TryupingCount is 0xFFFF and
- // the largest value of pHalData->FailTxRateCount is 0x14,
- // this condition will be satisfied at most every 2 min.
- //
-
- if((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
- (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping)
- {
+ if ((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
+ (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping) {
priv->TryupingCount = 0;
- //
- // When transferring from CCK to OFDM, DIG is an important issue.
- //
- if(priv->CurrentOperaRate == 22)
+ /*
+ * When transferring from CCK to OFDM, DIG is an important issue.
+ */
+ if (priv->CurrentOperaRate == 22)
bUpdateInitialGain = true;
- // The difference in throughput between 48Mbps and 36Mbps is 8M.
- // So, we must be carefully in this rate scale. Isaiah 2008-02-15.
- //
- if( ((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
- (priv->FailTxRateCount > 2) )
- priv->RateAdaptivePeriod= (RATE_ADAPTIVE_TIMER_PERIOD/2);
+ /*
+ * The difference in throughput between 48Mbps and 36Mbps is 8M.
+ * So, we must be carefully in this rate scale. Isaiah 2008-02-15.
+ */
+ if (((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
+ (priv->FailTxRateCount > 2))
+ priv->RateAdaptivePeriod = (RATE_ADAPTIVE_TIMER_PERIOD / 2);
- // (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold.
- // (2)If the signal strength is increased, it may be able to upgrade.
+ /* (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold. */
+ /* (2)If the signal strength is increased, it may be able to upgrade. */
priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
-// printk("StaRateAdaptive87SE(): Upgrade Tx Rate to %d\n", priv->CurrentOperaRate);
-
- //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
- if(priv->CurrentOperaRate ==36)
- {
- priv->bUpdateARFR=true;
- write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
-// printk("UP: ARFR=0xF8F\n");
- }
- else if(priv->bUpdateARFR)
- {
- priv->bUpdateARFR=false;
- write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
-// printk("UP: ARFR=0xFFF\n");
+
+ if (priv->CurrentOperaRate == 36) {
+ priv->bUpdateARFR = true;
+ write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
+ } else if(priv->bUpdateARFR) {
+ priv->bUpdateARFR = false;
+ write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
}
- // Update Fail Tx rate and count.
- if(priv->LastFailTxRate != priv->CurrentOperaRate)
- {
+ /* Update Fail Tx rate and count. */
+ if (priv->LastFailTxRate != priv->CurrentOperaRate) {
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 0;
- priv->LastFailTxRateSS = -200; // Set lowest power.
+ priv->LastFailTxRateSS = -200; /* Set lowest power. */
}
}
- }
- else
- {
- if(priv->TryupingCount > 0)
+ } else {
+ if (priv->TryupingCount > 0)
priv->TryupingCount --;
}
- if(bTryDown)
- {
+ if (bTryDown) {
priv->TryDownCountLowData++;
priv->TryupingCount = 0;
- {
-// printk("DN: pHalData->TryDownCountLowData = %d\n",priv->TryDownCountLowData);
-// printk("DN: TryDownTh =%d\n", TryDownTh);
-// printk("DN: pHalData->bTryuping=%d\n", priv->bTryuping);
- }
- //Check if Tx rate can be degraded or Test trying upgrading should fallback.
- if(priv->TryDownCountLowData > TryDownTh || priv->bTryuping)
- {
+ /* Check if Tx rate can be degraded or Test trying upgrading should fallback. */
+ if (priv->TryDownCountLowData > TryDownTh || priv->bTryuping) {
priv->TryDownCountLowData = 0;
priv->bTryuping = false;
- // Update fail information.
- if(priv->LastFailTxRate == priv->CurrentOperaRate)
- {
- priv->FailTxRateCount ++;
- // Record the Tx fail rate signal strength.
- if(CurrSignalStrength > priv->LastFailTxRateSS)
- {
+ /* Update fail information. */
+ if (priv->LastFailTxRate == priv->CurrentOperaRate) {
+ priv->FailTxRateCount++;
+ /* Record the Tx fail rate signal strength. */
+ if (CurrSignalStrength > priv->LastFailTxRateSS)
priv->LastFailTxRateSS = CurrSignalStrength;
- }
- }
- else
- {
+ } else {
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 1;
priv->LastFailTxRateSS = CurrSignalStrength;
}
priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
- // Reduce chariot training time at weak signal strength situation. SD3 ED demand.
- //[TRC Dell Lab] Revise Signal Threshold from -75 to -80 , Isaiah 2008-02-18 20:00
- if( (CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 ))
- {
+ /* Reduce chariot training time at weak signal strength situation. SD3 ED demand. */
+ if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 )) {
priv->CurrentOperaRate = 72;
-// printk("DN: weak signal strength (%d), degrade to 36Mbps\n", CurrSignalStrength);
}
- //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
- if(priv->CurrentOperaRate ==36)
- {
- priv->bUpdateARFR=true;
- write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
-// printk("DN: ARFR=0xF8F\n");
- }
- else if(priv->bUpdateARFR)
- {
- priv->bUpdateARFR=false;
- write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
-// printk("DN: ARFR=0xFFF\n");
+ if (priv->CurrentOperaRate == 36) {
+ priv->bUpdateARFR = true;
+ write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
+ } else if (priv->bUpdateARFR) {
+ priv->bUpdateARFR = false;
+ write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
}
- //
- // When it is CCK rate, it may need to update initial gain to receive lower power packets.
- //
- if(MgntIsCckRate(priv->CurrentOperaRate))
- {
+ /*
+ * When it is CCK rate, it may need to update initial gain to receive lower power packets.
+ */
+ if (MgntIsCckRate(priv->CurrentOperaRate)) {
bUpdateInitialGain = true;
}
-// printk("StaRateAdaptive87SE(): Degrade Tx Rate to %d\n", priv->CurrentOperaRate);
}
- }
- else
- {
- if(priv->TryDownCountLowData > 0)
- priv->TryDownCountLowData --;
+ } else {
+ if (priv->TryDownCountLowData > 0)
+ priv->TryDownCountLowData--;
}
- // Keep the Tx fail rate count to equal to 0x15 at most.
- // Reduce the fail count at least to 10 sec if tx rate is tending stable.
- if(priv->FailTxRateCount >= 0x15 ||
- (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6))
- {
- priv->FailTxRateCount --;
+ /*
+ * Keep the Tx fail rate count to equal to 0x15 at most.
+ * Reduce the fail count at least to 10 sec if tx rate is tending stable.
+ */
+ if (priv->FailTxRateCount >= 0x15 ||
+ (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6)) {
+ priv->FailTxRateCount--;
}
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
- //[TRC Dell Lab] Mac0x9e increase 2 level in 36M~18M situation, Isaiah 2008-02-18 24:00
- if((priv->CurrentOperaRate < 96) &&(priv->CurrentOperaRate > 22))
- {
+ /* Mac0x9e increase 2 level in 36M~18M situation */
+ if ((priv->CurrentOperaRate < 96) && (priv->CurrentOperaRate > 22)) {
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
- // case 1: Never enter High power
- if(u1bCck == CckTxPwrIdx )
- {
- if(u1bOfdm != (OfdmTxPwrIdx+2) )
- {
- priv->bEnhanceTxPwr= true;
- u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
+ /* case 1: Never enter High power */
+ if (u1bCck == CckTxPwrIdx) {
+ if (u1bOfdm != (OfdmTxPwrIdx + 2)) {
+ priv->bEnhanceTxPwr = true;
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
-// printk("Enhance OFDM_TXAGC : +++++ u1bOfdm= 0x%x\n", u1bOfdm);
}
- }
- // case 2: enter high power
- else if(u1bCck < CckTxPwrIdx)
- {
- if(!priv->bEnhanceTxPwr)
- {
- priv->bEnhanceTxPwr= true;
- u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
+ } else if (u1bCck < CckTxPwrIdx) {
+ /* case 2: enter high power */
+ if (!priv->bEnhanceTxPwr) {
+ priv->bEnhanceTxPwr = true;
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
- //RT_TRACE(COMP_RATE, DBG_TRACE, ("Enhance OFDM_TXAGC(2) : +++++ u1bOfdm= 0x%x\n", u1bOfdm));
}
}
- }
- else if(priv->bEnhanceTxPwr) //54/48/11/5.5/2/1
- {
+ } else if (priv->bEnhanceTxPwr) { /* 54/48/11/5.5/2/1 */
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
- // case 1: Never enter High power
- if(u1bCck == CckTxPwrIdx )
- {
- priv->bEnhanceTxPwr= false;
- write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
- //printk("Recover OFDM_TXAGC : ===== u1bOfdm= 0x%x\n", OfdmTxPwrIdx);
+ /* case 1: Never enter High power */
+ if (u1bCck == CckTxPwrIdx) {
+ priv->bEnhanceTxPwr = false;
+ write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
- // case 2: enter high power
- else if(u1bCck < CckTxPwrIdx)
- {
- priv->bEnhanceTxPwr= false;
- u1bOfdm = ((u1bOfdm-2) > 0) ? (u1bOfdm-2): 0;
+ /* case 2: enter high power */
+ else if (u1bCck < CckTxPwrIdx) {
+ priv->bEnhanceTxPwr = false;
+ u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2): 0;
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
- //RT_TRACE(COMP_RATE, DBG_TRACE, ("Recover OFDM_TXAGC(2): ===== u1bOfdm= 0x%x\n", u1bOfdm));
-
}
}
- //
- // We need update initial gain when we set tx rate "from OFDM to CCK" or
- // "from CCK to OFDM".
- //
+ /*
+ * We need update initial gain when we set tx rate "from OFDM to CCK" or
+ * "from CCK to OFDM".
+ */
SetInitialGain:
- if(bUpdateInitialGain)
- {
- if(MgntIsCckRate(priv->CurrentOperaRate)) // CCK
- {
- if(priv->InitialGain > priv->RegBModeGainStage)
- {
- priv->InitialGainBackUp= priv->InitialGain;
-
- if(CurrSignalStrength < -85) // Low power, OFDM [0x17] = 26.
- {
- //SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26.
+ if (bUpdateInitialGain) {
+ if (MgntIsCckRate(priv->CurrentOperaRate)) { /* CCK */
+ if (priv->InitialGain > priv->RegBModeGainStage) {
+ priv->InitialGainBackUp = priv->InitialGain;
+
+ if (CurrSignalStrength < -85) /* Low power, OFDM [0x17] = 26. */
+ /* SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26. */
priv->InitialGain = priv->RegBModeGainStage;
- }
- else if(priv->InitialGain > priv->RegBModeGainStage + 1)
- {
+
+ else if (priv->InitialGain > priv->RegBModeGainStage + 1)
priv->InitialGain -= 2;
- }
+
else
- {
- priv->InitialGain --;
- }
+ priv->InitialGain--;
+
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
- }
- else // OFDM
- {
- if(priv->InitialGain < 4)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ } else { /* OFDM */
+ if (priv->InitialGain < 4) {
+ priv->InitialGainBackUp = priv->InitialGain;
- priv->InitialGain ++;
+ priv->InitialGain++;
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
}
}
- //Record the related info
+ /* Record the related info */
priv->LastRetryRate = CurrRetryRate;
priv->LastTxThroughput = TxThroughput;
priv->ieee80211->rate = priv->CurrentOperaRate * 5;
}
-void rtl8180_rate_adapter(struct work_struct * work)
+void rtl8180_rate_adapter(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,rate_adapter_wq);
- struct net_device *dev = ieee->dev;
- //struct r8180_priv *priv = ieee80211_priv(dev);
-// DMESG("---->rtl8180_rate_adapter");
- StaRateAdaptive87SE(dev);
-// DMESG("<----rtl8180_rate_adapter");
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, rate_adapter_wq);
+ struct net_device *dev = ieee->dev;
+ StaRateAdaptive87SE(dev);
}
void timer_rate_adaptive(unsigned long data)
{
- struct r8180_priv* priv = ieee80211_priv((struct net_device *)data);
- //DMESG("---->timer_rate_adaptive()\n");
- if(!priv->up)
- {
-// DMESG("<----timer_rate_adaptive():driver is not up!\n");
+ struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
+ if (!priv->up) {
return;
}
- if((priv->ieee80211->iw_mode != IW_MODE_MASTER)
+ if ((priv->ieee80211->iw_mode != IW_MODE_MASTER)
&& (priv->ieee80211->state == IEEE80211_LINKED) &&
- (priv->ForcedDataRate == 0) )
- {
-// DMESG("timer_rate_adaptive():schedule rate_adapter_wq\n");
+ (priv->ForcedDataRate == 0)) {
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq);
-// StaRateAdaptive87SE((struct net_device *)data);
}
priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod);
add_timer(&priv->rateadapter_timer);
- //DMESG("<----timer_rate_adaptive()\n");
}
-//by amy 080312}
-void
-SwAntennaDiversityRxOk8185(
- struct net_device *dev,
- u8 SignalStrength
- )
+
+void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
-// printk("+SwAntennaDiversityRxOk8185: RxSs: %d\n", SignalStrength);
-
priv->AdRxOkCnt++;
- if( priv->AdRxSignalStrength != -1)
- {
- priv->AdRxSignalStrength = ((priv->AdRxSignalStrength*7) + (SignalStrength*3)) / 10;
- }
- else
- { // Initialization case.
+ if (priv->AdRxSignalStrength != -1) {
+ priv->AdRxSignalStrength = ((priv->AdRxSignalStrength * 7) + (SignalStrength * 3)) / 10;
+ } else { /* Initialization case. */
priv->AdRxSignalStrength = SignalStrength;
}
-//{+by amy 080312
- if( priv->LastRxPktAntenna ) //Main antenna.
+
+ if (priv->LastRxPktAntenna) /* Main antenna. */
priv->AdMainAntennaRxOkCnt++;
- else // Aux antenna.
+ else /* Aux antenna. */
priv->AdAuxAntennaRxOkCnt++;
-//+by amy 080312
-// printk("-SwAntennaDiversityRxOk8185: AdRxOkCnt: %d AdRxSignalStrength: %d\n", priv->AdRxOkCnt, priv->AdRxSignalStrength);
}
-//
-// Description:
-// Change Antenna Switch.
-//
-bool
-SetAntenna8185(
- struct net_device *dev,
- u8 u1bAntennaIndex
- )
+ /* Change Antenna Switch. */
+bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bAntennaSwitched = false;
-// printk("+SetAntenna8185(): Antenna is switching to: %d \n", u1bAntennaIndex);
-
- switch(u1bAntennaIndex)
- {
+ switch (u1bAntennaIndex) {
case 0:
/* Mac register, main antenna */
write_nic_byte(dev, ANTSEL, 0x03);
@@ -1319,64 +944,35 @@ SetAntenna8185(
}
if(bAntennaSwitched)
- {
priv->CurrAntennaIndex = u1bAntennaIndex;
- }
-
-// printk("-SetAntenna8185(): return (%#X)\n", bAntennaSwitched);
return bAntennaSwitched;
}
-//
-// Description:
-// Toggle Antenna switch.
-//
-bool
-SwitchAntenna(
- struct net_device *dev
- )
+ /* Toggle Antenna switch. */
+bool SwitchAntenna(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult;
- if(priv->CurrAntennaIndex == 0)
- {
- bResult = SetAntenna8185(dev, 1);
-//by amy 080312
-// printk("SwitchAntenna(): switching to antenna 1 ......\n");
-// bResult = SetAntenna8185(dev, 1);//-by amy 080312
- }
- else
- {
- bResult = SetAntenna8185(dev, 0);
-//by amy 080312
-// printk("SwitchAntenna(): switching to antenna 0 ......\n");
-// bResult = SetAntenna8185(dev, 0);//-by amy 080312
+ if (priv->CurrAntennaIndex == 0) {
+ bResult = SetAntenna8185(dev, 1);
+ } else {
+ bResult = SetAntenna8185(dev, 0);
}
return bResult;
}
-//
-// Description:
-// Engine of SW Antenna Diversity mechanism.
-// Since 8187 has no Tx part information,
-// this implementation is only dependend on Rx part information.
-//
-// 2006.04.17, by rcnjko.
-//
-void
-SwAntennaDiversity(
- struct net_device *dev
- )
+/*
+ * Engine of SW Antenna Diversity mechanism.
+ * Since 8187 has no Tx part information,
+ * this implementation is only dependend on Rx part information.
+ */
+void SwAntennaDiversity(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- bool bSwCheckSS=false;
-// printk("+SwAntennaDiversity(): CurrAntennaIndex: %d\n", priv->CurrAntennaIndex);
-// printk("AdTickCount is %d\n",priv->AdTickCount);
-//by amy 080312
- if(bSwCheckSS)
- {
+ bool bSwCheckSS = false;
+ if (bSwCheckSS) {
priv->AdTickCount++;
printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n",
@@ -1384,246 +980,162 @@ SwAntennaDiversity(
printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n",
priv->AdRxSignalStrength, priv->AdRxSsThreshold);
}
-// priv->AdTickCount++;//-by amy 080312
-
- // Case 1. No Link.
- if(priv->ieee80211->state != IEEE80211_LINKED)
- {
- // printk("SwAntennaDiversity(): Case 1. No Link.\n");
+ /* Case 1. No Link. */
+ if (priv->ieee80211->state != IEEE80211_LINKED) {
priv->bAdSwitchedChecking = false;
- // I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko..
+ /* I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko.. */
SwitchAntenna(dev);
- }
- // Case 2. Linked but no packet received.
- else if(priv->AdRxOkCnt == 0)
- {
- // printk("SwAntennaDiversity(): Case 2. Linked but no packet received.\n");
+ /* Case 2. Linked but no packet receive.d */
+ } else if (priv->AdRxOkCnt == 0) {
priv->bAdSwitchedChecking = false;
SwitchAntenna(dev);
- }
- // Case 3. Evaluate last antenna switch action and undo it if necessary.
- else if(priv->bAdSwitchedChecking == true)
- {
- // printk("SwAntennaDiversity(): Case 3. Evaluate last antenna switch action.\n");
+ /* Case 3. Evaluate last antenna switch action and undo it if necessary. */
+ } else if (priv->bAdSwitchedChecking == true) {
priv->bAdSwitchedChecking = false;
- // Adjust Rx signal strength threshold.
+ /* Adjust Rx signal strength threshold. */
priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;
- if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched)
- { // Rx signal strength is not improved after we swtiched antenna. => Swich back.
-// printk("SwAntennaDiversity(): Rx Signal Strength is not improved, CurrRxSs: %d, LastRxSs: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
-//by amy 080312
- // Increase Antenna Diversity checking period due to bad decision.
+ if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
+ /* Rx signal strength is not improved after we swtiched antenna. => Swich back. */
+ /* Increase Antenna Diversity checking period due to bad decision. */
priv->AdCheckPeriod *= 2;
-//by amy 080312
- // Increase Antenna Diversity checking period.
- if(priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
+ /* Increase Antenna Diversity checking period. */
+ if (priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
priv->AdCheckPeriod = priv->AdMaxCheckPeriod;
- // Wrong deceision => switch back.
+ /* Wrong deceision => switch back. */
SwitchAntenna(dev);
- }
- else
- { // Rx Signal Strength is improved.
-// printk("SwAntennaDiversity(): Rx Signal Strength is improved, CurrRxSs: %d, LastRxSs: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
+ } else {
+ /* Rx Signal Strength is improved. */
- // Reset Antenna Diversity checking period to its min value.
+ /* Reset Antenna Diversity checking period to its min value. */
priv->AdCheckPeriod = priv->AdMinCheckPeriod;
}
-// printk("SwAntennaDiversity(): AdRxSsThreshold: %d, AdCheckPeriod: %d\n",
-// priv->AdRxSsThreshold, priv->AdCheckPeriod);
}
- // Case 4. Evaluate if we shall switch antenna now.
- // Cause Table Speed is very fast in TRC Dell Lab, we check it every time.
- else// if(priv->AdTickCount >= priv->AdCheckPeriod)//-by amy 080312
- {
-// printk("SwAntennaDiversity(): Case 4. Evaluate if we shall switch antenna now.\n");
-
+ /* Case 4. Evaluate if we shall switch antenna now. */
+ /* Cause Table Speed is very fast in TRC Dell Lab, we check it every time. */
+ else {
priv->AdTickCount = 0;
- //
- // <Roger_Notes> We evaluate RxOk counts for each antenna first and than
- // evaluate signal strength.
- // The following operation can overcome the disability of CCA on both two antennas
- // When signal strength was extremely low or high.
- // 2008.01.30.
- //
-
- //
- // Evaluate RxOk count from each antenna if we shall switch default antenna now.
- // Added by Roger, 2008.02.21.
-//{by amy 080312
- if((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
- && (priv->CurrAntennaIndex == 0))
- { // We set Main antenna as default but RxOk count was less than Aux ones.
-
- // printk("SwAntennaDiversity(): Main antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Switch to Aux antenna.
+ /*
+ * <Roger_Notes> We evaluate RxOk counts for each antenna first and than
+ * evaluate signal strength.
+ * The following operation can overcome the disability of CCA on both two antennas
+ * When signal strength was extremely low or high.
+ * 2008.01.30.
+ */
+
+ /*
+ * Evaluate RxOk count from each antenna if we shall switch default antenna now.
+ */
+ if ((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
+ && (priv->CurrAntennaIndex == 0)) {
+ /* We set Main antenna as default but RxOk count was less than Aux ones. */
+
+ /* Switch to Aux antenna. */
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
- }
- else if((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
- && (priv->CurrAntennaIndex == 1))
- { // We set Aux antenna as default but RxOk count was less than Main ones.
+ } else if ((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
+ && (priv->CurrAntennaIndex == 1)) {
+ /* We set Aux antenna as default but RxOk count was less than Main ones. */
- // printk("SwAntennaDiversity(): Aux antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Switch to Main antenna.
+ /* Switch to Main antenna. */
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
- }
- else
- {// Default antenna is better.
+ } else {
+ /* Default antenna is better. */
- // printk("SwAntennaDiversity(): Default antenna is better., AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Still need to check current signal strength.
+ /* Still need to check current signal strength. */
priv->bHWAdSwitched = false;
}
- //
- // <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
- // didn't changed by HW evaluation.
- // 2008.02.27.
- //
- // [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
- // For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
- // but AdRxSignalStrength is less than main.
- // Our guess is that main antenna have lower throughput and get many change
- // to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
- //
- if( (!priv->bHWAdSwitched) && (bSwCheckSS))
- {
-//by amy 080312}
- // Evaluate Rx signal strength if we shall switch antenna now.
- if(priv->AdRxSignalStrength < priv->AdRxSsThreshold)
- { // Rx signal strength is weak => Switch Antenna.
-// printk("SwAntennaDiversity(): Rx Signal Strength is weak, CurrRxSs: %d, RxSsThreshold: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
-
- priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
- priv->bAdSwitchedChecking = true;
-
- SwitchAntenna(dev);
- }
- else
- { // Rx signal strength is OK.
-// printk("SwAntennaDiversity(): Rx Signal Strength is OK, CurrRxSs: %d, RxSsThreshold: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
-
- priv->bAdSwitchedChecking = false;
- // Increase Rx signal strength threshold if necessary.
- if( (priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && // Signal is much stronger than current threshold
- priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) // Current threhold is not yet reach upper limit.
- {
- priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
- priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
- priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;//+by amy 080312
- }
+ /*
+ * <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
+ * didn't changed by HW evaluation.
+ * 2008.02.27.
+ *
+ * [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
+ * For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
+ * but AdRxSignalStrength is less than main.
+ * Our guess is that main antenna have lower throughput and get many change
+ * to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
+ */
+ if ((!priv->bHWAdSwitched) && (bSwCheckSS)) {
+ /* Evaluate Rx signal strength if we shall switch antenna now. */
+ if (priv->AdRxSignalStrength < priv->AdRxSsThreshold) {
+ /* Rx signal strength is weak => Switch Antenna. */
+ priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
+ priv->bAdSwitchedChecking = true;
+
+ SwitchAntenna(dev);
+ } else {
+ /* Rx signal strength is OK. */
+ priv->bAdSwitchedChecking = false;
+ /* Increase Rx signal strength threshold if necessary. */
+ if ((priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && /* Signal is much stronger than current threshold */
+ priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) { /* Current threhold is not yet reach upper limit. */
+
+ priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
+ priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
+ priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;/* +by amy 080312 */
+ }
- // Reduce Antenna Diversity checking period if possible.
- if( priv->AdCheckPeriod > priv->AdMinCheckPeriod )
- {
- priv->AdCheckPeriod /= 2;
+ /* Reduce Antenna Diversity checking period if possible. */
+ if (priv->AdCheckPeriod > priv->AdMinCheckPeriod)
+ priv->AdCheckPeriod /= 2;
}
}
- }
}
-//by amy 080312
- // Reset antenna diversity Rx related statistics.
+ /* Reset antenna diversity Rx related statistics. */
priv->AdRxOkCnt = 0;
priv->AdMainAntennaRxOkCnt = 0;
priv->AdAuxAntennaRxOkCnt = 0;
-//by amy 080312
-
-// priv->AdRxOkCnt = 0;//-by amy 080312
-
-// printk("-SwAntennaDiversity()\n");
}
-//
-// Description:
-// Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise.
-//
-bool
-CheckTxPwrTracking( struct net_device *dev)
+ /* Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise. */
+bool CheckTxPwrTracking(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- if(!priv->bTxPowerTrack)
- {
+ if (!priv->bTxPowerTrack)
return false;
- }
-//lzm reserved 080826
- //if(priv->bScanInProgress)
- //{
- // return false;
- //}
-
- //if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah
- if(priv->bToUpdateTxPwr)
- {
+ /* if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah */
+ if (priv->bToUpdateTxPwr)
return false;
- }
return true;
}
-//
-// Description:
-// Timer callback function of SW Antenna Diversity.
-//
-void
-SwAntennaDiversityTimerCallback(
- struct net_device *dev
- )
+ /* Timer callback function of SW Antenna Diversity. */
+void SwAntennaDiversityTimerCallback(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
RT_RF_POWER_STATE rtState;
- //printk("+SwAntennaDiversityTimerCallback()\n");
-
- //
- // We do NOT need to switch antenna while RF is off.
- // 2007.05.09, added by Roger.
- //
+ /* We do NOT need to switch antenna while RF is off. */
rtState = priv->eRFPowerState;
- do{
- if (rtState == eRfOff)
- {
-// printk("SwAntennaDiversityTimer - RF is OFF.\n");
+ do {
+ if (rtState == eRfOff) {
break;
- }
- else if (rtState == eRfSleep)
- {
- // Don't access BB/RF under Disable PLL situation.
- //RT_TRACE((COMP_RF|COMP_ANTENNA), DBG_LOUD, ("SwAntennaDiversityTimerCallback(): RF is Sleep => skip it\n"));
+ } else if (rtState == eRfSleep) {
+ /* Don't access BB/RF under Disable PLL situation. */
break;
}
SwAntennaDiversity(dev);
- }while(false);
+ } while (false);
- if(priv->up)
- {
+ if (priv->up) {
priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD);
add_timer(&priv->SwAntennaDiversityTimer);
}
-
- //printk("-SwAntennaDiversityTimerCallback()\n");
}
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 39ef7e0193f..303ec691262 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -23,24 +23,22 @@
#include "ieee80211/dot11d.h"
-/* #define RATE_COUNT 4 */
u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
static CHANNEL_LIST DefaultChannelPlan[] = {
-/* {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, */ /*Default channel plan */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /*FCC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /*IC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*ETSI */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Spain. Change to ETSI. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*France. Change to ETSI. */
- {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /*MKK */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},/*MKK1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Israel. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /*For 11a , TELEC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /*For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Spain. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* France. Change to ETSI. */
+ {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /* MKK */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22}, /* MKK1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Israel */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /* For 11a , TELEC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /* For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
};
static int r8180_wx_get_freq(struct net_device *dev,
struct iw_request_info *a,
@@ -63,14 +61,7 @@ int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info,
if (erq->flags & IW_ENCODE_DISABLED)
-/* i = erq->flags & IW_ENCODE_INDEX;
- if (i < 1 || i > 4)
-*/
-
if (erq->length > 0) {
-
- /*int len = erq->length <= 5 ? 5 : 13; */
-
u32* tkey = (u32*) key;
priv->key0[0] = tkey[0];
priv->key0[1] = tkey[1];
@@ -192,33 +183,32 @@ static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
return 0;
down(&priv->wx_sem);
-/* printk("set mode ENABLE_IPS\n"); */
if (priv->bInactivePs) {
if (wrqu->mode == IW_MODE_ADHOC)
IPSLeave(dev);
}
ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
-/* rtl8180_commit(dev); */
-
up(&priv->wx_sem);
return ret;
}
/* YJ,add,080819,for hidden ap */
struct iw_range_with_scan_capa {
- /* Informative stuff (to choose between different interface) */
- __u32 throughput; /* To give an idea... */
+ /* Informative stuff (to choose between different interface) */
+
+ __u32 throughput; /* To give an idea... */
+
/* In theory this value should be the maximum benchmarked
- * TCP/IP throughput, because with most of these devices the
- * bit rate is meaningless (overhead an co) to estimate how
- * fast the connection will go and pick the fastest one.
- * I suggest people to play with Netperf or any benchmark...
- */
+ * TCP/IP throughput, because with most of these devices the
+ * bit rate is meaningless (overhead an co) to estimate how
+ * fast the connection will go and pick the fastest one.
+ * I suggest people to play with Netperf or any benchmark...
+ */
/* NWID (or domain id) */
- __u32 min_nwid; /* Minimal NWID we are able to set */
- __u32 max_nwid; /* Maximal NWID we are able to set */
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
__u16 old_num_channels;
@@ -238,7 +228,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
struct r8180_priv *priv = ieee80211_priv(dev);
u16 val;
int i;
- /*struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; */ /*YJ,add,080819,for hidden ap */
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
@@ -291,14 +280,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 16;
-/* range->retry_capa; */ /* What retry options are supported */
-/* range->retry_flags; */ /* How to decode max/min retry limit */
-/* range->r_time_flags;*/ /* How to decode max/min retry life */
-/* range->min_retry; */ /* Minimal number of retries */
-/* range->max_retry; */ /* Maximal number of retries */
-/* range->min_r_time; */ /* Minimal retry lifetime */
-/* range->max_r_time; */ /* Maximal retry lifetime */
-
range->num_channels = 14;
for (i = 0, val = 0; i < 14; i++) {
@@ -310,8 +291,8 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->freq[val].e = 1;
val++;
} else {
- /* FIXME: do we need to set anything for channels */
- /* we don't use ? */
+ /* FIXME: do we need to set anything for channels */
+ /* we don't use ? */
}
if (val == IW_MAX_FREQUENCIES)
@@ -322,8 +303,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
- /*tmp->scan_capa = 0x01; */ /*YJ,add,080819,for hidden ap */
-
return 0;
}
@@ -339,50 +318,29 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
if (priv->ieee80211->bHwRadioOff)
return 0;
-/*YJ,add,080819, for hidden ap */
- /*printk("==*&*&*&==>%s in\n", __func__); */
- /*printk("=*&*&*&*===>flag:%x, %x\n", wrqu->data.flags, IW_SCAN_THIS_ESSID); */
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct iw_scan_req* req = (struct iw_scan_req*)b;
if (req->essid_len) {
- /*printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); */
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
- /*printk("=====>network ssid:%s\n", ieee->current_network.ssid); */
}
}
-/*YJ,add,080819, for hidden ap, end */
down(&priv->wx_sem);
if (priv->up) {
-/* printk("set scan ENABLE_IPS\n"); */
priv->ieee80211->actscanning = true;
if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) {
IPSLeave(dev);
- /*down(&priv->ieee80211->wx_sem); */
-/*
- if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || !(priv->ieee80211->proto_started)){
- ret = -1;
- up(&priv->ieee80211->wx_sem);
- up(&priv->wx_sem);
- return ret;
- }
-*/
- /* queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); */
- /* printk("start scan============================>\n"); */
ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
-/* ieee80211_rtl_start_scan(priv->ieee80211); */
- /* intentionally forget to up sem */
-/* up(&priv->ieee80211->wx_sem); */
ret = 0;
} else {
- /* YJ,add,080828, prevent scan in BusyTraffic */
+ /* prevent scan in BusyTraffic */
/* FIXME: Need to consider last scan time */
if ((priv->link_detect.bBusyTraffic) && (true)) {
ret = 0;
printk("Now traffic is busy, please try later!\n");
} else
- /* YJ,add,080828, prevent scan in BusyTraffic,end */
+ /* prevent scan in BusyTraffic,end */
ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
}
} else
@@ -424,10 +382,8 @@ static int r8180_wx_set_essid(struct net_device *dev,
return 0;
down(&priv->wx_sem);
- /* printk("set essid ENABLE_IPS\n"); */
if (priv->bInactivePs)
IPSLeave(dev);
-/* printk("haha:set essid %s essid_len = %d essid_flgs = %d\n",b, wrqu->essid.length, wrqu->essid.flags); */
ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
@@ -597,28 +553,6 @@ static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info
return 1;
}
-
-/* added by christian */
-/*
-static int r8180_wx_set_monitor_type(struct net_device *dev, struct iw_request_info *aa, union
- iwreq_data *wrqu, char *p){
-
- struct r8180_priv *priv = ieee80211_priv(dev);
- int *parms=(int*)p;
- int mode=parms[0];
-
- if(priv->ieee80211->iw_mode != IW_MODE_MONITOR) return -1;
- priv->prism_hdr = mode;
- if(!mode)dev->type=ARPHRD_IEEE80211;
- else dev->type=ARPHRD_IEEE80211_PRISM;
- DMESG("using %s RX encap", mode ? "AVS":"80211");
- return 0;
-
-}
-*/
-/*of r8180_wx_set_monitor_type */
-/* end added christian */
-
static int r8180_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -661,14 +595,6 @@ static int r8180_wx_set_retry(struct net_device *dev,
*/
rtl8180_commit(dev);
- /*
- if(priv->up){
- rtl8180_rtx_disable(dev);
- rtl8180_rx_enable(dev);
- rtl8180_tx_enable(dev);
-
- }
- */
exit:
up(&priv->wx_sem);
@@ -695,8 +621,6 @@ static int r8180_wx_get_retry(struct net_device *dev,
wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
wrqu->retry.value = priv->retry_data;
}
- /* DMESG("returning %d",wrqu->retry.value); */
-
return 0;
}
@@ -726,7 +650,6 @@ static int r8180_wx_set_sens(struct net_device *dev,
return 0;
down(&priv->wx_sem);
- /* DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); */
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
@@ -847,58 +770,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
return -1;
}
-/*
-static int r8180_wx_get_psmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee;
- int ret = 0;
-
-
-
- down(&priv->wx_sem);
-
- if(priv) {
- ieee = priv->ieee80211;
- if(ieee->ps == IEEE80211_PS_DISABLED) {
- *((unsigned int *)extra) = IEEE80211_PS_DISABLED;
- goto exit;
- }
- *((unsigned int *)extra) = IW_POWER_TIMEOUT;
- if (ieee->ps & IEEE80211_PS_MBCAST)
- *((unsigned int *)extra) |= IW_POWER_ALL_R;
- else
- *((unsigned int *)extra) |= IW_POWER_UNICAST_R;
- } else
- ret = -1;
-exit:
- up(&priv->wx_sem);
-
- return ret;
-}
-static int r8180_wx_set_psmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
- //struct ieee80211_device *ieee;
- int ret = 0;
-
-
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
-
- up(&priv->wx_sem);
-
- return ret;
-
-}
-*/
-
static int r8180_wx_get_iwmode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -964,7 +835,6 @@ static int r8180_wx_set_iwmode(struct net_device *dev,
} else {
ieee->mode = mode;
ieee->modulation = modulation;
-/* ieee80211_start_protocol(ieee); */
}
up(&priv->wx_sem);
@@ -1016,7 +886,6 @@ static int r8180_wx_get_siglevel(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
int ret = 0;
@@ -1036,7 +905,6 @@ static int r8180_wx_get_sigqual(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
int ret = 0;
@@ -1150,7 +1018,6 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_device *ieee = netdev_priv(dev); */
int *val = (int *)extra;
int i;
printk("-----in fun %s\n", __func__);
@@ -1223,7 +1090,6 @@ static int r8180_wx_set_enc_ext(struct net_device *dev,
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* printk("===>%s()\n", __func__); */
int ret = 0;
@@ -1240,7 +1106,6 @@ static int r8180_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- /* printk("====>%s()\n", __func__); */
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -1257,8 +1122,6 @@ static int r8180_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- /* printk("====>%s()\n", __func__); */
-
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1278,7 +1141,6 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-/* printk("====>%s(), len:%d\n", __func__, data->length); */
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1291,68 +1153,67 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
#endif
up(&priv->wx_sem);
- /* printk("<======%s(), ret:%d\n", __func__, ret); */
return ret;
}
static iw_handler r8180_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
+ NULL, /* SIOCSIWCOMMIT */
r8180_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
+ dummy, /* SIOCSIWNWID */
+ dummy, /* SIOCGIWNWID */
r8180_wx_set_freq, /* SIOCSIWFREQ */
r8180_wx_get_freq, /* SIOCGIWFREQ */
r8180_wx_set_mode, /* SIOCSIWMODE */
r8180_wx_get_mode, /* SIOCGIWMODE */
r8180_wx_set_sens, /* SIOCSIWSENS */
r8180_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
+ NULL, /* SIOCSIWRANGE */
+ rtl8180_wx_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ dummy, /* SIOCSIWSPY */
+ dummy, /* SIOCGIWSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCWIWTHRSPY */
r8180_wx_set_wap, /* SIOCSIWAP */
r8180_wx_get_wap, /* SIOCGIWAP */
r8180_wx_set_mlme, /* SIOCSIWMLME*/
- dummy, /* SIOCGIWAPLIST -- depricated */
+ dummy, /* SIOCGIWAPLIST -- depricated */
r8180_wx_set_scan, /* SIOCSIWSCAN */
r8180_wx_get_scan, /* SIOCGIWSCAN */
r8180_wx_set_essid, /* SIOCSIWESSID */
r8180_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
+ dummy, /* SIOCSIWNICKN */
+ dummy, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
r8180_wx_set_rate, /* SIOCSIWRATE */
r8180_wx_get_rate, /* SIOCGIWRATE */
r8180_wx_set_rts, /* SIOCSIWRTS */
r8180_wx_get_rts, /* SIOCGIWRTS */
r8180_wx_set_frag, /* SIOCSIWFRAG */
r8180_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
+ dummy, /* SIOCSIWTXPOW */
+ dummy, /* SIOCGIWTXPOW */
r8180_wx_set_retry, /* SIOCSIWRETRY */
r8180_wx_get_retry, /* SIOCGIWRETRY */
r8180_wx_set_enc, /* SIOCSIWENCODE */
r8180_wx_get_enc, /* SIOCGIWENCODE */
r8180_wx_set_power, /* SIOCSIWPOWER */
r8180_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
+ NULL, /*---hole---*/
+ NULL, /*---hole---*/
+ r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
+ NULL, /* SIOCSIWGENIE */
r8180_wx_set_auth, /* SIOCSIWAUTH */
- NULL, /* SIOCSIWAUTH */
- r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
+ NULL, /* SIOCSIWAUTH */
+ r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
};
@@ -1373,14 +1234,6 @@ static const struct iw_priv_args r8180_private_args[] = {
0, 0, "dummy"
},
- /* added by christian */
- /*
- {
- SIOCIWFIRSTPRIV + 0x2,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "prismhdr"
- },
- */
- /* end added by christian */
{
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
@@ -1399,18 +1252,6 @@ static const struct iw_priv_args r8180_private_args[] = {
0, 0, "dummy"
},
-/*
- {
- SIOCIWFIRSTPRIV + 0x5,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpsmode"
- },
- {
- SIOCIWFIRSTPRIV + 0x6,
- IW_PRIV_SIZE_FIXED, 0, "setpsmode"
- },
-*/
-/* set/get mode have been realized in public handlers */
-
{
SIOCIWFIRSTPRIV + 0x8,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode"
@@ -1481,7 +1322,7 @@ static const struct iw_priv_args r8180_private_args[] = {
static iw_handler r8180_private_handler[] = {
- r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
+ r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
dummy,
r8180_wx_set_beaconinterval,
dummy,
@@ -1513,16 +1354,15 @@ static inline int is_same_network(struct ieee80211_network *src,
struct ieee80211_network *dst,
struct ieee80211_device *ieee)
{
- /* A network is only a duplicate if the channel, BSSID, ESSID
- * and the capability field (in particular IBSS and BSS) all match.
- * We treat all <hidden> with the same BSSID and channel
- * as one network */
- return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- /* ((src->ssid_len == dst->ssid_len) && */
+ /* A network is only a duplicate if the channel, BSSID, ESSID
+ * and the capability field (in particular IBSS and BSS) all match.
+ * We treat all <hidden> with the same BSSID and channel
+ * as one network
+ */
+ return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
(src->channel == dst->channel) &&
!memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
(!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- /*!memcmp(src->ssid, dst->ssid, src->ssid_len) && */
((src->capability & WLAN_CAPABILITY_IBSS) ==
(dst->capability & WLAN_CAPABILITY_IBSS)) &&
((src->capability & WLAN_CAPABILITY_BSS) ==
@@ -1535,11 +1375,9 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
struct iw_statistics* wstats = &priv->wstats;
- /* struct ieee80211_network* target = NULL; */
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
- /* unsigned long flag; */
if (ieee->state < IEEE80211_LINKED) {
wstats->qual.qual = 0;
@@ -1552,9 +1390,7 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
tmp_level = (&ieee->current_network)->stats.signal;
tmp_qual = (&ieee->current_network)->stats.signalstrength;
tmp_noise = (&ieee->current_network)->stats.noise;
- /* printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); */
-/* printk("level:%d\n", tmp_level); */
wstats->qual.level = tmp_level;
wstats->qual.qual = tmp_qual;
wstats->qual.noise = tmp_noise;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 6c5061f12ba..13979b5ea32 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2453,7 +2453,7 @@ static inline void update_network(struct rtllib_network *dst,
if (src->wmm_param[0].ac_aci_acm_aifsn ||
src->wmm_param[1].ac_aci_acm_aifsn ||
src->wmm_param[2].ac_aci_acm_aifsn ||
- src->wmm_param[1].ac_aci_acm_aifsn)
+ src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
dst->SignalStrength = src->SignalStrength;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 1637f111099..c5a15dba1bf 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2234,7 +2234,6 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
if (!network)
return 1;
- memset(network, 0, sizeof(*network));
ieee->state = RTLLIB_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
@@ -2259,8 +2258,8 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->handle_assoc_response(ieee->dev,
(struct rtllib_assoc_response_frame *)header,
network);
- kfree(network);
}
+ kfree(network);
kfree(ieee->assocresp_ies);
ieee->assocresp_ies = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c9bdc7f6bdc..be2a28cf8ed 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -237,7 +237,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
+ printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
ieee->dev->name);
return 0;
/*
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index c09be0a6646..9c00865f302 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -105,7 +105,6 @@ u32 rt_global_debug_component = \
static const struct usb_device_id rtl8192_usb_id_tbl[] = {
/* Realtek */
- {USB_DEVICE(0x0bda, 0x8192)},
{USB_DEVICE(0x0bda, 0x8709)},
/* Corega */
{USB_DEVICE(0x07aa, 0x0043)},
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index ea37473f71e..6a43312380e 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -9,13 +9,6 @@ config R8712U
This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
If built as a module, it will be called r8712u.
-config R8712_AP
- bool "Realtek RTL8712U AP code"
- depends on R8712U
- default N
- ---help---
- This option allows the Realtek RTL8712 USB device to be an Access Point.
-
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index ed85b441520..e83665d0602 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -140,7 +140,6 @@ struct dvobj_priv {
u8 ishighspeed;
uint(*inirp_init)(struct _adapter *adapter);
uint(*inirp_deinit)(struct _adapter *adapter);
- struct semaphore usb_suspend_sema;
struct usb_device *pusbdev;
};
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 98a3d684f9b..7bbd53a410e 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -330,7 +330,6 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
r8712_init_pwrctrl_priv(padapter);
- sema_init(&(padapter->pwrctrlpriv.pnp_pwr_mgnt_sema), 0);
mp871xinit(padapter);
if (init_default_value(padapter) != _SUCCESS)
return _FAIL;
@@ -476,11 +475,6 @@ static int netdev_close(struct net_device *pnetdev)
r8712_free_assoc_resources(padapter);
/*s2-4.*/
r8712_free_network_queue(padapter);
- /* The interface is no longer Up: */
- padapter->bup = false;
- release_firmware(padapter->fw);
- /* never exit with a firmware callback pending */
- wait_for_completion(&padapter->rtl8712_fw_ready);
return 0;
}
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 1ee943a58c4..9ba603310fd 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -72,18 +72,6 @@ static inline struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-static inline void _enter_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- down(prwlock);
-}
-
-static inline void _exit_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- up(prwlock);
-}
-
static inline void list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -152,11 +140,6 @@ static inline u32 _down_sema(struct semaphore *sema)
return _SUCCESS;
}
-static inline void _rtl_rwlock_init(struct semaphore *prwlock)
-{
- sema_init(prwlock, 1);
-}
-
static inline void _init_listhead(struct list_head *list)
{
INIT_LIST_HEAD(list);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 6d692657e78..fa6dc9c09b3 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -55,8 +55,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
int alignment = 0;
struct sk_buff *pskb = NULL;
- sema_init(&precvpriv->recv_sema, 0);
- sema_init(&precvpriv->terminate_recvthread_sema, 0);
/*init recv_buf*/
_init_queue(&precvpriv->free_recv_buf_queue);
precvpriv->pallocated_recv_buf = _malloc(NR_RECVBUFF *
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index ca84ee02eac..abc1c97378f 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -131,7 +131,6 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf);
for (i = 0; i < NUM_IOREQ; i++) {
_init_listhead(&pio_req->list);
- sema_init(&pio_req->sema, 0);
list_insert_tail(&pio_req->list, &pio_queue->free_ioreqs);
pio_req++;
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 86308a0093e..d3d8727c2ec 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -117,7 +117,6 @@ struct io_req {
u32 command;
u32 status;
u8 *pbuf;
- struct semaphore sema;
void (*_async_io_callback)(struct _adapter *padater,
struct io_req *pio_req, u8 *cnxt);
u8 *cnxt;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 507584b837c..ef35bc29a3f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -2380,13 +2380,7 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
tmp_qual = padapter->recvpriv.signal;
tmp_noise = padapter->recvpriv.noise;
piwstats->qual.level = tmp_level;
- /*piwstats->qual.qual = tmp_qual;
- * The NetworkManager of Fedora 10, 13 will use the link
- * quality for its display.
- * So, use the fw_rssi on link quality variable because
- * fw_rssi will be updated per 2 seconds.
- */
- piwstats->qual.qual = tmp_level;
+ piwstats->qual.qual = tmp_qual;
piwstats->qual.noise = tmp_noise;
}
piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 23e72a0401a..9fd2ec7596c 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -100,7 +100,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
{
struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv);
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
@@ -110,8 +109,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm >= PS_STATE_S2) {
if (pwrpriv->alives & CMD_ALIVE)
up(&(pcmdpriv->cmd_queue_sema));
- if (pwrpriv->alives & XMIT_ALIVE)
- up(&(pxmitpriv->xmit_sema));
}
pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
up(&pwrpriv->lock);
@@ -145,12 +142,12 @@ static void SetPSModeWorkItemCallback(struct work_struct *work)
struct pwrctrl_priv, SetPSModeWorkItem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
- _enter_pwrlock(&pwrpriv->lock);
if (!pwrpriv->bSleep) {
+ _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
r8712_set_rpwm(padapter, PS_STATE_S4);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_workitem_callback(struct work_struct *work)
@@ -160,13 +157,13 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
u8 cpwm = pwrpriv->cpwm;
- _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->cpwm != pwrpriv->rpwm) {
+ _enter_pwrlock(&pwrpriv->lock);
cpwm = r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_check_handler (void *FunctionContext)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index b41ca2892be..6024c4f63d5 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -133,7 +133,6 @@ struct pwrctrl_priv {
u8 rpwm_retry;
uint bSetPSModeWorkItemInProgress;
- struct semaphore pnp_pwr_mgnt_sema;
spinlock_t pnp_pwr_mgnt_lock;
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 7069f06d9b5..5b03b405883 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -93,7 +93,6 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
- sema_init(&precvpriv->allrxreturnevt, 0);
return r8712_init_recv_priv(precvpriv, padapter);
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index cc7a72fee1c..e42e6f0a15e 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -85,8 +85,6 @@ using enter_critical section to protect
*/
struct recv_priv {
spinlock_t lock;
- struct semaphore recv_sema;
- struct semaphore terminate_recvthread_sema;
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
u8 *pallocated_frame_buf;
@@ -100,7 +98,6 @@ struct recv_priv {
uint rx_largepacket_crcerr;
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
- struct semaphore allrxreturnevt;
u8 rx_pending_cnt;
uint ff_hwaddr;
struct tasklet_struct recv_tasklet;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 81bde803c59..1247b3d9719 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -42,10 +42,8 @@ static void _init_stainfo(struct sta_info *psta)
_init_listhead(&psta->hash_list);
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
-#ifdef CONFIG_R8712_AP
_init_listhead(&psta->asoc_list);
_init_listhead(&psta->auth_list);
-#endif
}
u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
@@ -72,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
get_list_head(&pstapriv->free_sta_queue));
psta++;
}
-#ifdef CONFIG_R8712_AP
_init_listhead(&pstapriv->asoc_list);
_init_listhead(&pstapriv->auth_list);
-#endif
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 8bbdee70f86..aa57e7754f0 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->xmit_sema, 0);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _init_queue below
*/
@@ -121,7 +119,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX);
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
pxmitpriv->txirp_cnt = 1;
- sema_init(&(pxmitpriv->tx_retevt), 0);
/*per AC pending irp*/
pxmitpriv->beq_cnt = 0;
pxmitpriv->bkq_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index a034c0fec71..638b79b4c5a 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -202,8 +202,6 @@ struct hw_txqueue {
struct xmit_priv {
spinlock_t lock;
- struct semaphore xmit_sema;
- struct semaphore terminate_xmitthread_sema;
struct __queue be_pending;
struct __queue bk_pending;
struct __queue vi_pending;
@@ -233,7 +231,6 @@ struct xmit_priv {
uint tx_drop;
struct hw_xmit *hwxmits;
u8 hwxmit_entry;
- struct semaphore tx_retevt;/*all tx return event;*/
u8 txirp_cnt;
struct tasklet_struct xmit_tasklet;
_workitem xmit_pipe4_reset_wi;
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 48d6a14c8f5..f8016e9abff 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -90,7 +90,6 @@ struct sta_info {
* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO
* sta_info: (AP & STA) CAP/INFO
*/
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int expire_to;
@@ -98,7 +97,6 @@ struct sta_info {
unsigned int authalg;
unsigned char chg_txt[128];
unsigned int tx_ra_bitmap;
-#endif
};
struct sta_priv {
@@ -111,13 +109,11 @@ struct sta_priv {
struct __queue sleep_q;
struct __queue wakeup_q;
struct _adapter *padapter;
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int auth_to; /* sec, time to expire in authenticating. */
unsigned int assoc_to; /* sec, time to expire before associating. */
unsigned int expire_to; /* sec , time to expire after associated. */
-#endif
};
static inline u32 wifi_mac_hash(u8 *mac)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bade184883..e419b4fd82b 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -30,6 +30,7 @@
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/firmware.h>
#include "osdep_service.h"
#include "drv_types.h"
@@ -105,10 +106,10 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* RTL8191SU */
/* Realtek */
{USB_DEVICE(0x0BDA, 0x8172)},
+ {USB_DEVICE(0x0BDA, 0x8192)},
/* Amigo */
{USB_DEVICE(0x0EB0, 0x9061)},
/* ASUS/EKB */
- {USB_DEVICE(0x0BDA, 0x8172)},
{USB_DEVICE(0x13D3, 0x3323)},
{USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
{USB_DEVICE(0x13D3, 0x3342)},
@@ -160,7 +161,6 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* RTL8192SU */
/* Realtek */
{USB_DEVICE(0x0BDA, 0x8174)},
- {USB_DEVICE(0x0BDA, 0x8174)},
/* Belkin */
{USB_DEVICE(0x050D, 0x845A)},
/* Corega */
@@ -281,7 +281,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
}
if ((r8712_alloc_io_queue(padapter)) == _FAIL)
status = _FAIL;
- sema_init(&(padapter->dvobjpriv.usb_suspend_sema), 0);
return status;
}
@@ -623,6 +622,10 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
usb_set_intfdata(pusb_intf, NULL);
if (padapter) {
+ if (padapter->fw_found)
+ release_firmware(padapter->fw);
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
if (drvpriv.drv_registered == true)
padapter->bSurpriseRemoved = true;
if (pnetdev != NULL) {
diff --git a/drivers/staging/rts5139/TODO b/drivers/staging/rts5139/TODO
index 4bde726ea5f..dd5fabb8ea7 100644
--- a/drivers/staging/rts5139/TODO
+++ b/drivers/staging/rts5139/TODO
@@ -2,4 +2,8 @@ TODO:
- support more USB card reader of Realtek family
- use kernel coding style
- checkpatch.pl fixes
-
+- stop having thousands of lines of code duplicated with staging/rts_pstor
+- This driver contains an entire SD/MMC stack -- it should use the stack in
+ drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-usb.c;
+ see drivers/mmc/host/ushc.c as an example.
+- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/rts5139/ms.h b/drivers/staging/rts5139/ms.h
index f9d46d210f2..3ce1dc90f19 100644
--- a/drivers/staging/rts5139/ms.h
+++ b/drivers/staging/rts5139/ms.h
@@ -249,9 +249,9 @@ int ms_delay_write(struct rts51x_chip *chip);
#ifdef SUPPORT_MAGIC_GATE
int ms_switch_clock(struct rts51x_chip *chip);
-int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
-int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt,
u8 write_start, u8 write_cnt);
diff --git a/drivers/staging/rts5139/rts51x_chip.c b/drivers/staging/rts5139/rts51x_chip.c
index adc0d000573..b3e0bb22b0f 100644
--- a/drivers/staging/rts5139/rts51x_chip.c
+++ b/drivers/staging/rts5139/rts51x_chip.c
@@ -541,7 +541,7 @@ int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout)
return STATUS_SUCCESS;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status)
{
int retval;
u16 val;
@@ -577,7 +577,7 @@ int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data)
return STATUS_SUCCESS;
}
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
@@ -620,7 +620,7 @@ int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
return STATUS_SUCCESS;
}
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
u16 value = 0;
@@ -720,7 +720,7 @@ int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
return STATUS_SUCCESS;
}
-int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -735,7 +735,7 @@ int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
return STATUS_SUCCESS;
}
-int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -776,7 +776,7 @@ int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val)
return STATUS_SUCCESS;
}
-int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 * val)
+int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 *val)
{
int retval;
@@ -921,7 +921,7 @@ void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear)
}
#endif
-void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 * status,
+void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 *status,
u8 status_len)
{
struct sd_info *sd_card = &(chip->sd_card);
diff --git a/drivers/staging/rts5139/rts51x_chip.h b/drivers/staging/rts5139/rts51x_chip.h
index 321ece750ed..13fc2a410d9 100644
--- a/drivers/staging/rts5139/rts51x_chip.h
+++ b/drivers/staging/rts5139/rts51x_chip.h
@@ -857,12 +857,12 @@ static inline u8 *rts51x_get_rsp_data(struct rts51x_chip *chip)
return chip->rsp_buf;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status);
int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data);
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
u8 data);
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
u8 *data);
int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index 0453f57d1a8..94d75f08d25 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -48,7 +48,7 @@ int rts51x_open(struct inode *inode, struct file *filp);
int rts51x_release(struct inode *inode, struct file *filp);
ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos);
-ssize_t rts51x_write(struct file *filp, const char __user * buf, size_t count,
+ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos);
#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index e11467acc57..da9c83b4942 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -883,7 +883,7 @@ int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
return result;
}
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
{
unsigned int pipe = RCV_INTR_PIPE(chip);
struct usb_host_endpoint *ep;
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index 8464c4836d5..9dd556ea9c0 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -73,7 +73,7 @@ int rts51x_start_epc_transfer(struct rts51x_chip *chip);
void rts51x_cancel_epc_transfer(struct rts51x_chip *chip);
#endif
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status);
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip);
#endif /* __RTS51X_TRANSPORT_H */
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index 407cd43ad3b..d5969d992d8 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -233,7 +233,7 @@ RTY_SEND_CMD:
return STATUS_SUCCESS;
}
-int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 * rsp, u8 rsp_type)
+int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 *rsp, u8 rsp_type)
{
int retval, rsp_len;
u16 reg_addr;
diff --git a/drivers/staging/rts_pstor/TODO b/drivers/staging/rts_pstor/TODO
index 2f93a7c1b5a..becb95e4f2c 100644
--- a/drivers/staging/rts_pstor/TODO
+++ b/drivers/staging/rts_pstor/TODO
@@ -2,4 +2,8 @@ TODO:
- support more pcie card reader of Realtek family
- use kernel coding style
- checkpatch.pl fixes
-
+- stop having thousands of lines of code duplicated with staging/rts5139
+- This driver contains an entire SD/MMC stack -- it should use the stack in
+ drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-pci.c;
+ see drivers/mmc/host/via-sdmmc.c as an example.
+- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index 7ad1a838203..1336aab11bd 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -188,7 +188,7 @@ void dc_intr_rx(struct channel *sc)
}
if (sc->s.LOS) {
- error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT ||
+ error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT |
SBE_2T3E3_RX_DESC_MII_ERROR);
}
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
index 92bf16667d0..185b676d858 100644
--- a/drivers/staging/sep/Kconfig
+++ b/drivers/staging/sep/Kconfig
@@ -3,7 +3,8 @@ config DX_SEP
depends on PCI
help
Discretix SEP driver; used for the security processor subsystem
- on bard the Intel Mobile Internet Device.
+ on board the Intel Mobile Internet Device and adds SEP availability
+ to the kernel crypto infrastructure
The driver's name is sep_driver.
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
index 628d5f91941..e48a7959289 100644
--- a/drivers/staging/sep/Makefile
+++ b/drivers/staging/sep/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
+ccflags-y += -I$(srctree)/$(src)
+obj-$(CONFIG_DX_SEP) += sep_driver.o
+sep_driver-objs := sep_crypto.o sep_main.o
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
index 8f3b878ad8a..3524d0cf84b 100644
--- a/drivers/staging/sep/TODO
+++ b/drivers/staging/sep/TODO
@@ -1,4 +1,3 @@
Todo's so far (from Alan Cox)
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces - Crypto API 'glue' is still not ready to submit
-- Clean up un-needed debug prints - Started to work on this
+- Clean up unused ioctls
+- Clean up unused fields in ioctl structures
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
new file mode 100644
index 00000000000..1cc790e9fa0
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.c
@@ -0,0 +1,4058 @@
+/*
+ *
+ * sep_crypto.c - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* #define DEBUG */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+
+/* Globals for queuing */
+static spinlock_t queue_lock;
+static struct crypto_queue sep_queue;
+
+/* Declare of dequeuer */
+static void sep_dequeuer(void *data);
+
+/* TESTING */
+/**
+ * crypto_sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
+{
+#if 0
+ u32 *p;
+ u32 *i;
+ int count;
+
+ p = sep->shared_addr;
+ i = (u32 *)msg;
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x (local)%x\n",
+ current->pid, count/4, *p++, *i++);
+#endif
+}
+
+/**
+ * sep_do_callback
+ * @work: pointer to work_struct
+ * This is what is called by the queue; it is generic so that it
+ * can be used by any type of operation as each different callback
+ * function can use the data parameter in its own way
+ */
+static void sep_do_callback(struct work_struct *work)
+{
+ struct sep_work_struct *sep_work = container_of(work,
+ struct sep_work_struct, work);
+ if (sep_work != NULL) {
+ (sep_work->callback)(sep_work->data);
+ kfree(sep_work);
+ } else {
+ pr_debug("sep crypto: do callback - NULL container\n");
+ }
+}
+
+/**
+ * sep_submit_work
+ * @work_queue: pointer to struct_workqueue
+ * @funct: pointer to function to execute
+ * @data: pointer to data; function will know
+ * how to use it
+ * This is a generic API to submit something to
+ * the queue. The callback function will depend
+ * on what operation is to be done
+ */
+static int sep_submit_work(struct workqueue_struct *work_queue,
+ void(*funct)(void *),
+ void *data)
+{
+ struct sep_work_struct *sep_work;
+ int result;
+
+ sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
+
+ if (sep_work == NULL) {
+ pr_debug("sep crypto: cant allocate work structure\n");
+ return -ENOMEM;
+ }
+
+ sep_work->callback = funct;
+ sep_work->data = data;
+ INIT_WORK(&sep_work->work, sep_do_callback);
+ result = queue_work(work_queue, &sep_work->work);
+ if (!result) {
+ pr_debug("sep_crypto: queue_work failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * sep_alloc_sg_buf -
+ * @sep: pointer to struct sep_device
+ * @size: total size of area
+ * @block_size: minimum size of chunks
+ * each page is minimum or modulo this size
+ * @returns: pointer to struct scatterlist for new
+ * buffer
+ **/
+static struct scatterlist *sep_alloc_sg_buf(
+ struct sep_device *sep,
+ size_t size,
+ size_t block_size)
+{
+ u32 nbr_pages;
+ u32 ct1;
+ void *buf;
+ size_t current_size;
+ size_t real_page_size;
+
+ struct scatterlist *sg, *sg_temp;
+
+ if (size == 0)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
+
+ current_size = 0;
+ nbr_pages = 0;
+ real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
+ /**
+ * The size of each page must be modulo of the operation
+ * block size; increment by the modified page size until
+ * the total size is reached, then you have the number of
+ * pages
+ */
+ while (current_size < size) {
+ current_size += real_page_size;
+ nbr_pages += 1;
+ }
+
+ sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
+ if (!sg) {
+ dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
+ return NULL;
+ }
+
+ sg_init_table(sg, nbr_pages);
+
+ current_size = 0;
+ sg_temp = sg;
+ for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
+ buf = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (!buf) {
+ dev_warn(&sep->pdev->dev,
+ "Cannot allocate page for new buffer\n");
+ kfree(sg);
+ return NULL;
+ }
+
+ sg_set_buf(sg_temp, buf, real_page_size);
+ if ((size - current_size) > real_page_size) {
+ sg_temp->length = real_page_size;
+ current_size += real_page_size;
+ } else {
+ sg_temp->length = (size - current_size);
+ current_size = size;
+ }
+ sg_temp = sg_next(sg);
+ }
+ return sg;
+}
+
+/**
+ * sep_free_sg_buf -
+ * @sg: pointer to struct scatterlist; points to area to free
+ */
+static void sep_free_sg_buf(struct scatterlist *sg)
+{
+ struct scatterlist *sg_temp = sg;
+ while (sg_temp) {
+ free_page((unsigned long)sg_virt(sg_temp));
+ sg_temp = sg_next(sg_temp);
+ }
+ kfree(sg);
+}
+
+/**
+ * sep_copy_sg -
+ * @sep: pointer to struct sep_device
+ * @sg_src: pointer to struct scatterlist for source
+ * @sg_dst: pointer to struct scatterlist for destination
+ * @size: size (in bytes) of data to copy
+ *
+ * Copy data from one scatterlist to another; both must
+ * be the same size
+ */
+static void sep_copy_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg_src,
+ struct scatterlist *sg_dst,
+ size_t size)
+{
+ u32 seg_size;
+ u32 in_offset, out_offset;
+
+ u32 count = 0;
+ struct scatterlist *sg_src_tmp = sg_src;
+ struct scatterlist *sg_dst_tmp = sg_dst;
+ in_offset = 0;
+ out_offset = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg\n");
+
+ if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
+ return;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
+
+ while (count < size) {
+ if ((sg_src_tmp->length - in_offset) >
+ (sg_dst_tmp->length - out_offset))
+ seg_size = sg_dst_tmp->length - out_offset;
+ else
+ seg_size = sg_src_tmp->length - in_offset;
+
+ if (seg_size > (size - count))
+ seg_size = (size = count);
+
+ memcpy(sg_virt(sg_dst_tmp) + out_offset,
+ sg_virt(sg_src_tmp) + in_offset,
+ seg_size);
+
+ in_offset += seg_size;
+ out_offset += seg_size;
+ count += seg_size;
+
+ if (in_offset >= sg_src_tmp->length) {
+ sg_src_tmp = sg_next(sg_src_tmp);
+ in_offset = 0;
+ }
+
+ if (out_offset >= sg_dst_tmp->length) {
+ sg_dst_tmp = sg_next(sg_dst_tmp);
+ out_offset = 0;
+ }
+ }
+}
+
+/**
+ * sep_oddball_pages -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist - buffer to check
+ * @size: total data size
+ * @blocksize: minimum block size; must be multiples of this size
+ * @to_copy: 1 means do copy, 0 means do not copy
+ * @new_sg: pointer to location to put pointer to new sg area
+ * @returns: 1 if new scatterlist is needed; 0 if not needed;
+ * error value if operation failed
+ *
+ * The SEP device requires all pages to be multiples of the
+ * minimum block size appropriate for the operation
+ * This function check all pages; if any are oddball sizes
+ * (not multiple of block sizes), it creates a new scatterlist.
+ * If the to_copy parameter is set to 1, then a scatter list
+ * copy is performed. The pointer to the new scatterlist is
+ * put into the address supplied by the new_sg parameter; if
+ * no new scatterlist is needed, then a NULL is put into
+ * the location at new_sg.
+ *
+ */
+static int sep_oddball_pages(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ size_t data_size,
+ u32 block_size,
+ struct scatterlist **new_sg,
+ u32 do_copy)
+{
+ struct scatterlist *sg_temp;
+ u32 flag;
+ u32 nbr_pages, page_count;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball\n");
+ if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
+ return 0;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
+ flag = 0;
+ nbr_pages = 0;
+ page_count = 0;
+ sg_temp = sg;
+
+ while (sg_temp) {
+ nbr_pages += 1;
+ sg_temp = sg_next(sg_temp);
+ }
+
+ sg_temp = sg;
+ while ((sg_temp) && (flag == 0)) {
+ page_count += 1;
+ if (sg_temp->length % block_size)
+ flag = 1;
+ else
+ sg_temp = sg_next(sg_temp);
+ }
+
+ /* Do not process if last (or only) page is oddball */
+ if (nbr_pages == page_count)
+ flag = 0;
+
+ if (flag) {
+ dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
+ *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
+ if (*new_sg == NULL) {
+ dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
+ return -ENOMEM;
+ }
+
+ if (do_copy)
+ sep_copy_sg(sep, sg, *new_sg, data_size);
+
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * sep_copy_offset_sg -
+ * @sep: pointer to struct sep_device;
+ * @sg: pointer to struct scatterlist
+ * @offset: offset into scatterlist memory
+ * @dst: place to put data
+ * @len: length of data
+ * @returns: number of bytes copies
+ *
+ * This copies data from scatterlist buffer
+ * offset from beginning - it is needed for
+ * handling tail data in hash
+ */
+static size_t sep_copy_offset_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ u32 offset,
+ void *dst,
+ u32 len)
+{
+ size_t page_start;
+ size_t page_end;
+ size_t offset_within_page;
+ size_t length_within_page;
+ size_t length_remaining;
+ size_t current_offset;
+
+ /* Find which page is beginning of segment */
+ page_start = 0;
+ page_end = sg->length;
+ while ((sg) && (offset > page_end)) {
+ page_start += sg->length;
+ sg = sg_next(sg);
+ if (sg)
+ page_end += sg->length;
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+
+ offset_within_page = offset - page_start;
+ if ((sg->length - offset_within_page) >= len) {
+ /* All within this page */
+ memcpy(dst, sg_virt(sg) + offset_within_page, len);
+ return len;
+ } else {
+ /* Scattered multiple pages */
+ current_offset = 0;
+ length_remaining = len;
+ while ((sg) && (current_offset < len)) {
+ length_within_page = sg->length - offset_within_page;
+ if (length_within_page >= length_remaining) {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_remaining);
+ length_remaining = 0;
+ current_offset = len;
+ } else {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_within_page);
+ length_remaining -= length_within_page;
+ current_offset += length_within_page;
+ offset_within_page = 0;
+ sg = sg_next(sg);
+ }
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+ }
+ return len;
+}
+
+/**
+ * partial_overlap -
+ * @src_ptr: source pointer
+ * @dst_ptr: destination pointer
+ * @nbytes: number of bytes
+ * @returns: 0 for success; -1 for failure
+ * We cannot have any partial overlap. Total overlap
+ * where src is the same as dst is okay
+ */
+static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
+{
+ /* Check for partial overlap */
+ if (src_ptr != dst_ptr) {
+ if (src_ptr < dst_ptr) {
+ if ((src_ptr + nbytes) > dst_ptr)
+ return -EINVAL;
+ } else {
+ if ((dst_ptr + nbytes) > src_ptr)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
+{
+#if 0
+ int ct1;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev,
+ "Dump of %s starting at %08lx for %08x bytes\n",
+ stg, (unsigned long)start, len);
+ for (ct1 = 0; ct1 < len; ct1 += 1) {
+ ptt = (u8 *)(start + ct1);
+ dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
+ if (ct1 % 16 == 15)
+ dev_dbg(&sep->pdev->dev, "\n");
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump_sg(struct sep_device *sep, char *stg,
+ struct scatterlist *sg)
+{
+#if 0
+ int ct1, ct2;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
+
+ ct1 = 0;
+ while (sg) {
+ dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
+ sg->length);
+ dev_dbg(&sep->pdev->dev, "phys addr is %lx",
+ (unsigned long)sg_phys(sg));
+ ptt = sg_virt(sg);
+ for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
+ dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
+ ct2, (unsigned char)*(ptt + ct2));
+ }
+
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined */
+static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
+
+ {
+ unsigned char *cptr;
+ struct sep_aes_internal_context *aes_internal;
+ struct sep_des_internal_context *des_internal;
+ int ct1;
+
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for DES\n");
+ cptr = (unsigned char *)des_internal->iv_context;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for DES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for AES\n");
+ cptr = (unsigned char *)aes_internal->aes_ctx_iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for AES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ }
+}
+
+/**
+ * RFC2451: Weak key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+static int sep_weak_key(const u8 *key, unsigned int keylen)
+{
+ static const u8 parity[] = {
+ 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 3,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 5, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 6, 8,
+ };
+
+ u32 n, w;
+
+ n = parity[key[0]]; n <<= 4;
+ n |= parity[key[1]]; n <<= 4;
+ n |= parity[key[2]]; n <<= 4;
+ n |= parity[key[3]]; n <<= 4;
+ n |= parity[key[4]]; n <<= 4;
+ n |= parity[key[5]]; n <<= 4;
+ n |= parity[key[6]]; n <<= 4;
+ n |= parity[key[7]];
+ w = 0x88888888L;
+
+ /* 1 in 10^10 keys passes this test */
+ if (!((n - (w >> 3)) & w)) {
+ if (n < 0x41415151) {
+ if (n < 0x31312121) {
+ if (n < 0x14141515) {
+ /* 01 01 01 01 01 01 01 01 */
+ if (n == 0x11111111)
+ goto weak;
+ /* 01 1F 01 1F 01 0E 01 0E */
+ if (n == 0x13131212)
+ goto weak;
+ } else {
+ /* 01 E0 01 E0 01 F1 01 F1 */
+ if (n == 0x14141515)
+ goto weak;
+ /* 01 FE 01 FE 01 FE 01 FE */
+ if (n == 0x16161616)
+ goto weak;
+ }
+ } else {
+ if (n < 0x34342525) {
+ /* 1F 01 1F 01 0E 01 0E 01 */
+ if (n == 0x31312121)
+ goto weak;
+ /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
+ if (n == 0x33332222)
+ goto weak;
+ } else {
+ /* 1F E0 1F E0 0E F1 0E F1 */
+ if (n == 0x34342525)
+ goto weak;
+ /* 1F FE 1F FE 0E FE 0E FE */
+ if (n == 0x36362626)
+ goto weak;
+ }
+ }
+ } else {
+ if (n < 0x61616161) {
+ if (n < 0x44445555) {
+ /* E0 01 E0 01 F1 01 F1 01 */
+ if (n == 0x41415151)
+ goto weak;
+ /* E0 1F E0 1F F1 0E F1 0E */
+ if (n == 0x43435252)
+ goto weak;
+ } else {
+ /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
+ if (n == 0x44445555)
+ goto weak;
+ /* E0 FE E0 FE F1 FE F1 FE */
+ if (n == 0x46465656)
+ goto weak;
+ }
+ } else {
+ if (n < 0x64646565) {
+ /* FE 01 FE 01 FE 01 FE 01 */
+ if (n == 0x61616161)
+ goto weak;
+ /* FE 1F FE 1F FE 0E FE 0E */
+ if (n == 0x63636262)
+ goto weak;
+ } else {
+ /* FE E0 FE E0 FE F1 FE F1 */
+ if (n == 0x64646565)
+ goto weak;
+ /* FE FE FE FE FE FE FE FE */
+ if (n == 0x66666666)
+ goto weak;
+ }
+ }
+ }
+ }
+ return 0;
+weak:
+ return 1;
+}
+/**
+ * sep_sg_nents
+ */
+static u32 sep_sg_nents(struct scatterlist *sg)
+{
+ u32 ct1 = 0;
+ while (sg) {
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+
+ return ct1;
+}
+
+/**
+ * sep_start_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @returns: offset to place for the next word in the message
+ * Set up pointer in message pool for new message
+ */
+static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
+{
+ u32 *word_ptr;
+ ta_ctx->msg_len_words = 2;
+ ta_ctx->msgptr = ta_ctx->msg;
+ memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+ ta_ctx->msgptr += sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ *word_ptr = SEP_START_MSG_TOKEN;
+ return sizeof(u32) * 2;
+}
+
+/**
+ * sep_end_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @messages_offset: current message offset
+ * Returns: 0 for success; <0 otherwise
+ * End message; set length and CRC; and
+ * send interrupt to the SEP
+ */
+static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
+{
+ u32 *word_ptr;
+ /* Msg size goes into msg after token */
+ ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ word_ptr += 1;
+ *word_ptr = ta_ctx->msg_len_words;
+
+ /* CRC (currently 0) goes at end of msg */
+ word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
+ *word_ptr = 0;
+}
+
+/**
+ * sep_start_inbound_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: offset to place for the next word in the message
+ * @returns: 0 for success; error value for failure
+ * Set up pointer in message pool for inbound message
+ */
+static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
+{
+ u32 *word_ptr;
+ u32 token;
+ u32 error = SEP_OK;
+
+ *msg_offset = sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ token = *word_ptr;
+ ta_ctx->msg_len_words = *(word_ptr + 1);
+
+ if (token != SEP_START_MSG_TOKEN) {
+ error = SEP_INVALID_START;
+ goto end_function;
+ }
+
+end_function:
+
+ return error;
+}
+
+/**
+ * sep_write_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data into the message area from caller
+ */
+static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+ memcpy(void_ptr, in_addr, size);
+ *msg_offset += max_size;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+}
+
+/**
+ * sep_make_header
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: pointer to current offset (is updated)
+ * @op_code: op code to put into message
+ * Puts op code into message and updates offset
+ */
+static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ u32 op_code)
+{
+ u32 *word_ptr;
+
+ *msg_offset = sep_start_msg(ta_ctx);
+ word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
+ *word_ptr = op_code;
+ *msg_offset += sizeof(u32);
+}
+
+
+
+/**
+ * sep_read_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data out of the message area to caller
+ */
+static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+
+ memcpy(in_addr, void_ptr, size);
+ *msg_offset += max_size;
+}
+
+/**
+ * sep_verify_op -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @op_code: expected op_code
+ * @msg_offset: pointer to current offset (is updated)
+ * @returns: 0 for success; error for failure
+ */
+static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
+ u32 *msg_offset)
+{
+ u32 error;
+ u32 in_ary[2];
+
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ dev_dbg(&sep->pdev->dev, "dumping return message\n");
+ error = sep_start_inbound_msg(ta_ctx, msg_offset);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_start_inbound_msg error\n");
+ return error;
+ }
+
+ sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
+ msg_offset, 0);
+
+ if (in_ary[0] != op_code) {
+ dev_warn(&sep->pdev->dev,
+ "sep got back wrong opcode\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[0], op_code);
+ return SEP_WRONG_OPCODE;
+ }
+
+ if (in_ary[1] != SEP_OK) {
+ dev_warn(&sep->pdev->dev,
+ "sep execution error\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[1], SEP_OK);
+ return in_ary[0];
+ }
+
+return 0;
+}
+
+/**
+ * sep_read_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @dst: pointer to place to put the context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function reads the context from the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *dst, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_write_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @src: pointer to the current context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function writes the context to the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *src, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_clear_out -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * Clear out crypto related values in sep device structure
+ * to enable device to be used by anyone; either kernel
+ * crypto or userspace app via middleware
+ */
+static void sep_clear_out(struct this_task_ctx *ta_ctx)
+{
+ if (ta_ctx->src_sg_hold) {
+ sep_free_sg_buf(ta_ctx->src_sg_hold);
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ if (ta_ctx->dst_sg_hold) {
+ sep_free_sg_buf(ta_ctx->dst_sg_hold);
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ ta_ctx->src_sg = NULL;
+ ta_ctx->dst_sg = NULL;
+
+ sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
+
+ if (ta_ctx->i_own_sep) {
+ /**
+ * The following unlocks the sep and makes it available
+ * to any other application
+ * First, null out crypto entries in sep before relesing it
+ */
+ ta_ctx->sep_used->current_hash_req = NULL;
+ ta_ctx->sep_used->current_cypher_req = NULL;
+ ta_ctx->sep_used->current_request = 0;
+ ta_ctx->sep_used->current_hash_stage = 0;
+ ta_ctx->sep_used->ta_ctx = NULL;
+ ta_ctx->sep_used->in_kernel = 0;
+
+ ta_ctx->call_status.status = 0;
+
+ /* Remove anything confidentail */
+ memset(ta_ctx->sep_used->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ ta_ctx->sep_used->in_use = 0;
+ pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
+ pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ ta_ctx->sep_used->pid_doing_transaction = 0;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "[PID%d] waking up next transaction\n",
+ current->pid);
+
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ wake_up(&ta_ctx->sep_used->event_transactions);
+
+ ta_ctx->i_own_sep = 0;
+ }
+}
+
+/**
+ * Release crypto infrastructure from EINPROGRESS and
+ * clear sep_dev so that SEP is available to anyone
+ */
+static void sep_crypto_release(struct sep_system_ctx *sctx,
+ struct this_task_ctx *ta_ctx, u32 error)
+{
+ struct ahash_request *hash_req = ta_ctx->current_hash_req;
+ struct ablkcipher_request *cypher_req =
+ ta_ctx->current_cypher_req;
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ sep_clear_out(ta_ctx);
+
+ /**
+ * This may not yet exist depending when we
+ * chose to bail out. If it does exist, set
+ * it to 1
+ */
+ if (ta_ctx->are_we_done_yet != NULL)
+ *ta_ctx->are_we_done_yet = 1;
+
+ if (cypher_req != NULL) {
+ if ((sctx->key_sent == 1) ||
+ ((error != 0) && (error != -EINPROGRESS))) {
+ if (cypher_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for cypher!");
+ } else {
+ cypher_req->base.complete(
+ &cypher_req->base, error);
+ }
+ }
+ }
+
+ if (hash_req != NULL) {
+ if (hash_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for hash!");
+ } else {
+ hash_req->base.complete(
+ &hash_req->base, error);
+ }
+ }
+}
+
+/**
+ * This is where we grab the sep itself and tell it to do something.
+ * It will sleep if the sep is currently busy
+ * and it will return 0 if sep is now ours; error value if there
+ * were problems
+ */
+static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
+{
+ struct sep_device *sep = ta_ctx->sep_used;
+ int result;
+ struct sep_msgarea_hdr *my_msg_header;
+
+ my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
+
+ /* add to status queue */
+ ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
+ ta_ctx->nbytes, current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!ta_ctx->queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ " status error\n", current->pid);
+ return -EINVAL;
+ }
+
+ /* get the device; this can sleep */
+ result = sep_wait_transaction(sep);
+ if (result)
+ return result;
+
+ if (sep_dev->power_save_setup == 1)
+ pm_runtime_get_sync(&sep_dev->pdev->dev);
+
+ /* Copy in the message */
+ memcpy(sep->shared_addr, ta_ctx->msg,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Copy in the dcb information if there is any */
+ if (ta_ctx->dcb_region) {
+ result = sep_activate_dcb_dmatables_context(sep,
+ &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
+ ta_ctx->dma_ctx);
+ if (result)
+ return result;
+ }
+
+ /* Mark the device so we know how to finish the job in the tasklet */
+ if (ta_ctx->current_hash_req)
+ sep->current_hash_req = ta_ctx->current_hash_req;
+ else
+ sep->current_cypher_req = ta_ctx->current_cypher_req;
+
+ sep->current_request = ta_ctx->current_request;
+ sep->current_hash_stage = ta_ctx->current_hash_stage;
+ sep->ta_ctx = ta_ctx;
+ sep->in_kernel = 1;
+ ta_ctx->i_own_sep = 1;
+
+ /* need to set bit first to avoid race condition with interrupt */
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
+
+ result = sep_send_command_handler(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
+ current->pid);
+
+ if (!result)
+ dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
+ current->pid);
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
+ current->pid);
+ clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &ta_ctx->call_status.status);
+ }
+
+ return result;
+}
+
+/**
+ * This function sets things up for a crypto data block process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_block_data(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+ void *src_ptr;
+ void *dst_ptr;
+
+ static char small_buf[100];
+ ssize_t copy_result;
+ int result;
+
+ struct scatterlist *new_sg;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
+ req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto block: src is %lx dst is %lx\n",
+ (unsigned long)req->src, (unsigned long)req->dst);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->dst_sg = new_sg;
+ ta_ctx->dst_sg_hold = new_sg;
+ } else {
+ ta_ctx->dst_sg = req->dst;
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ /* set nbytes for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ /* Key already done; this is for data */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg in", ta_ctx->src_sg);
+
+ /* check for valid data and proper spacing */
+ src_ptr = sg_virt(ta_ctx->src_sg);
+ dst_ptr = sg_virt(ta_ctx->dst_sg);
+
+ if (!src_ptr || !dst_ptr ||
+ (ta_ctx->current_cypher_req->nbytes %
+ crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size odd\n");
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size is %x\n",
+ crypto_ablkcipher_blocksize(tfm));
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher data size is %x\n",
+ ta_ctx->current_cypher_req->nbytes);
+ return -EINVAL;
+ }
+
+ if (partial_overlap(src_ptr, dst_ptr,
+ ta_ctx->current_cypher_req->nbytes)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "block partial overlap\n");
+ return -EINVAL;
+ }
+
+ /* Put together the message */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
+
+ /* If des, and size is 1 block, put directly in msg */
+ if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
+ (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "writing out one block des\n");
+
+ copy_result = sg_copy_to_buffer(
+ ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
+ small_buf, crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ return -ENOMEM;
+ }
+
+ /* Put data into message */
+ sep_write_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ /* Put size into message */
+ sep_write_msg(ta_ctx, &req->nbytes,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ } else {
+ /* Otherwise, fill out dma tables */
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes;
+ ta_ctx->dcb_input_data.app_out_address = dst_ptr;
+ ta_ctx->dcb_input_data.block_size =
+ crypto_ablkcipher_blocksize(tfm);
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
+
+ result = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "crypto dma table create failed\n");
+ return -EINVAL;
+ }
+
+ /* Portion of msg is nulled (no data) */
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+ msg[3] = (u32)0;
+ msg[4] = (u32)0;
+ sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
+ sizeof(u32) * 5, &msg_offset, 0);
+ }
+
+ /**
+ * Before we write the message, we need to overwrite the
+ * vendor's IV with the one from our own ablkcipher walk
+ * iv because this is needed for dm-crypt
+ */
+ sep_dump_ivs(req, "sending data block to sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy((void *)des_internal->iv_context,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy((void *)aes_internal->aes_ctx_iv,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* Write context into message */
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/**
+ * This function sets things up for a crypto key submit process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_send_key(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+
+ u32 max_length;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep crypto block data size of %x\n", req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ /* check iv */
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
+ }
+
+ if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
+ }
+
+ /* put together message to SEP */
+ /* Start with op code */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
+
+ /* now deal with IV */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ if (ta_ctx->des_opmode == SEP_DES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += 4 * sizeof(u32);
+ }
+ } else {
+ max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
+ sizeof(u32)) * sizeof(u32);
+ if (ta_ctx->aes_opmode == SEP_AES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_AES_IV_SIZE_BYTES, max_length,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += max_length;
+ }
+ }
+
+ /* load the key */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
+ sizeof(u32) * 8, sizeof(u32) * 8,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->des_nbr_keys;
+ msg[1] = (u32)ta_ctx->des_encmode;
+ msg[2] = (u32)ta_ctx->des_opmode;
+
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+ } else {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
+ sctx->keylen,
+ SEP_AES_MAX_KEY_SIZE_BYTES,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->aes_key_size;
+ msg[1] = (u32)ta_ctx->aes_encmode;
+ msg[2] = (u32)ta_ctx->aes_opmode;
+ msg[3] = (u32)0; /* Secret key is not used */
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 4, sizeof(u32) * 4,
+ &msg_offset, 0);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/* This needs to be run as a work queue as it can be put asleep */
+static void sep_crypto_block(void *data)
+{
+ unsigned long end_time;
+
+ int result;
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+ int are_we_done_yet;
+
+ req = (struct ablkcipher_request *)data;
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ pr_debug("sep_crypto_block\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
+ tfm, sctx, ta_ctx);
+ pr_debug("key_sent is %d\n", sctx->key_sent);
+
+ /* do we need to send the key */
+ if (sctx->key_sent == 0) {
+ are_we_done_yet = 0;
+ result = sep_crypto_send_key(req); /* prep to send key */
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could not prep key %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for key send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) &&
+ (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send key job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Set the key sent variable so this can be skipped later */
+ sctx->key_sent = 1;
+ }
+
+ /* Key sent (or maybe not if we did not have to), now send block */
+ are_we_done_yet = 0;
+
+ result = sep_crypto_block_data(req);
+
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could prep not send block %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for block send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send block job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* That's it; entire thing done, get out of queue */
+
+ pr_debug("crypto_block leaving\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
+}
+
+/**
+ * Post operation (after interrupt) for crypto block
+ */
+static u32 crypto_post_op(struct sep_device *sep)
+{
+ /* HERE */
+ u32 u32_error;
+ u32 msg_offset;
+
+ ssize_t copy_result;
+ static char small_buf[100];
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ struct crypto_ablkcipher *tfm;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ if (!sep->current_cypher_req)
+ return -EINVAL;
+
+ /* hold req since we need to submit work after clearing sep */
+ req = sep->current_cypher_req;
+
+ ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
+ tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("crypto_post op\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
+ crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Is this the result of performing init (key to SEP */
+ if (sctx->key_sent == 0) {
+
+ /* Did SEP do it okay */
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
+ &msg_offset);
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "aes init error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ sep_dump_ivs(req, "after sending key to sep\n");
+
+ /* key sent went okay; release sep, and set are_we_done_yet */
+ sctx->key_sent = 1;
+ sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
+
+ } else {
+
+ /**
+ * This is the result of a block request
+ */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto_post_op block response\n");
+
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep block error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return -EINVAL;
+ }
+
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for DES\n");
+
+ /* special case for 1 block des */
+ if (sep->current_cypher_req->nbytes ==
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ sep_read_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "reading in block des\n");
+
+ copy_result = sg_copy_from_buffer(
+ ta_ctx->dst_sg,
+ sep_sg_nents(ta_ctx->dst_sg),
+ small_buf,
+ crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result !=
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ sep_crypto_release(sctx, ta_ctx,
+ -ENOMEM);
+ return -ENOMEM;
+ }
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ } else {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for AES\n");
+
+ /* Skip the MAC Output */
+ msg_offset += (sizeof(u32) * 4);
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ }
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg out", ta_ctx->dst_sg);
+
+ /* Copy to correct sg if this block had oddball pages */
+ if (ta_ctx->dst_sg_hold)
+ sep_copy_sg(ta_ctx->sep_used,
+ ta_ctx->dst_sg,
+ ta_ctx->current_cypher_req->dst,
+ ta_ctx->current_cypher_req->nbytes);
+
+ /**
+ * Copy the iv's back to the walk.iv
+ * This is required for dm_crypt
+ */
+ sep_dump_ivs(req, "got data block from sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy(ta_ctx->walk.iv,
+ (void *)des_internal->iv_context,
+ crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy(ta_ctx->walk.iv,
+ (void *)aes_internal->aes_ctx_iv,
+ crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* finished, release everything */
+ sep_crypto_release(sctx, ta_ctx, 0);
+ }
+ pr_debug("crypto_post_op done\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ return 0;
+}
+
+static u32 hash_init_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_update_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /**
+ * Following is only for finup; if we just completd the
+ * data portion of finup, we now need to kick off the
+ * finish portion of finup.
+ */
+
+ if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
+
+ /* first reset stage to HASH_FINUP_FINISH */
+ ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
+
+ /* now enqueue the finish operation */
+ spin_lock_irq(&queue_lock);
+ u32_error = crypto_enqueue_request(&sep_queue,
+ &ta_ctx->sep_used->current_hash_req->base);
+ spin_unlock_irq(&queue_lock);
+
+ if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "spe cypher post op cant queue\n");
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* schedule the data send */
+ u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cant submit work sep_crypto_block\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return -EINVAL;
+ }
+ }
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_final_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_digest_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish error %x\n", u32_error);
+
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish post op done\n");
+
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+/**
+ * The sep_finish function is the function that is schedule (via tasket)
+ * by the interrupt service routine when the SEP sends and interrupt
+ * This is only called by the interrupt handler as a tasklet.
+ */
+static void sep_finish(unsigned long data)
+{
+ struct sep_device *sep_dev;
+ int res;
+
+ res = 0;
+
+ if (data == 0) {
+ pr_debug("sep_finish called with null data\n");
+ return;
+ }
+
+ sep_dev = (struct sep_device *)data;
+ if (sep_dev == NULL) {
+ pr_debug("sep_finish; sep_dev is NULL\n");
+ return;
+ }
+
+ if (sep_dev->in_kernel == (u32)0) {
+ dev_warn(&sep_dev->pdev->dev,
+ "sep_finish; not in kernel operation\n");
+ return;
+ }
+
+ /* Did we really do a sep command prior to this? */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &sep_dev->ta_ctx->call_status.status)) {
+
+ dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ return;
+ }
+
+ if (sep_dev->send_ct != sep_dev->reply_ct) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; no message came back\n",
+ current->pid);
+ return;
+ }
+
+ /* Check for error (In case time ran out) */
+ if ((res != 0x0) && (res != 0x8)) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; poll error GPR3 is %x\n",
+ current->pid, res);
+ return;
+ }
+
+ /* What kind of interrupt from sep was this? */
+ res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
+ current->pid, res);
+
+ /* Print request? */
+ if ((res >> 30) & 0x1) {
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
+ current->pid);
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
+ current->pid,
+ (char *)(sep_dev->shared_addr +
+ SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
+ return;
+ }
+
+ /* Request for daemon (not currently in POR)? */
+ if (res >> 31) {
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep request; ignoring\n",
+ current->pid);
+ return;
+ }
+
+ /* If we got here, then we have a replay to a sep command */
+
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep reply to command; processing request: %x\n",
+ current->pid, sep_dev->current_request);
+
+ switch (sep_dev->current_request) {
+ case AES_CBC:
+ case AES_ECB:
+ case DES_CBC:
+ case DES_ECB:
+ res = crypto_post_op(sep_dev);
+ break;
+ case SHA1:
+ case MD5:
+ case SHA224:
+ case SHA256:
+ switch (sep_dev->current_hash_stage) {
+ case HASH_INIT:
+ res = hash_init_post_op(sep_dev);
+ break;
+ case HASH_UPDATE:
+ case HASH_FINUP_DATA:
+ res = hash_update_post_op(sep_dev);
+ break;
+ case HASH_FINUP_FINISH:
+ case HASH_FINISH:
+ res = hash_final_post_op(sep_dev);
+ break;
+ case HASH_DIGEST:
+ res = hash_digest_post_op(sep_dev);
+ break;
+ default:
+ pr_debug("sep - invalid stage for hash finish\n");
+ }
+ break;
+ default:
+ pr_debug("sep - invalid request for finish\n");
+ }
+
+ if (res)
+ pr_debug("sep - finish returned error %x\n", res);
+}
+
+static int sep_hash_cra_init(struct crypto_tfm *tfm)
+ {
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ pr_debug("sep_hash_cra_init name is %s\n", alg_name);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct this_task_ctx));
+ return 0;
+ }
+
+static void sep_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_hash_cra_exit\n");
+}
+
+static void sep_hash_init(void *data)
+{
+ u32 msg_offset;
+ int result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init\n");
+ ta_ctx->current_hash_stage = HASH_INIT;
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_update(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 len;
+ struct sep_hash_internal_context *int_ctx;
+ u32 block_size;
+ u32 head_len;
+ u32 tail_len;
+ int are_we_done_yet;
+
+ static u32 msg[10];
+ static char small_buf[100];
+ void *src_ptr;
+ struct scatterlist *new_sg;
+ ssize_t copy_result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update\n");
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+ len = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Compute header/tail sizes */
+ int_ctx = (struct sep_hash_internal_context *)&sctx->
+ hash_private_ctx.internal_context;
+ head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
+ tail_len = (req->nbytes - head_len) % block_size;
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size =
+ req->nbytes - (head_len + tail_len);
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Handle remainders */
+
+ /* Head */
+ sep_write_msg(ta_ctx, &head_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (head_len) {
+ copy_result = sg_copy_to_buffer(
+ req->src,
+ sep_sg_nents(ta_ctx->src_sg),
+ small_buf, head_len);
+
+ if (copy_result != head_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg head copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, head_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ int_error = sep_crypto_take_sep(ta_ctx);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_final(void *data)
+{
+ u32 msg_offset;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ int result;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final\n");
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_digest(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 block_size;
+ u32 msg[10];
+ size_t copy_result;
+ int result;
+ int are_we_done_yet;
+ u32 tail_len;
+ static char small_buf[100];
+ struct scatterlist *new_sg;
+ void *src_ptr;
+
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest\n");
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+/**
+ * This is what is called by each of the API's provided
+ * in the kernel crypto descriptors. It is run in a process
+ * context using the kernel workqueues. Therefore it can
+ * be put to sleep.
+ */
+static void sep_dequeuer(void *data)
+{
+ struct crypto_queue *this_queue;
+ struct crypto_async_request *async_req;
+ struct crypto_async_request *backlog;
+ struct ablkcipher_request *cypher_req;
+ struct ahash_request *hash_req;
+ struct sep_system_ctx *sctx;
+ struct crypto_ahash *hash_tfm;
+ struct this_task_ctx *ta_ctx;
+
+
+ this_queue = (struct crypto_queue *)data;
+
+ spin_lock_irq(&queue_lock);
+ backlog = crypto_get_backlog(this_queue);
+ async_req = crypto_dequeue_request(this_queue);
+ spin_unlock_irq(&queue_lock);
+
+ if (!async_req) {
+ pr_debug("sep crypto queue is empty\n");
+ return;
+ }
+
+ if (backlog) {
+ pr_debug("sep crypto backlog set\n");
+ if (backlog->complete)
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (!async_req->tfm) {
+ pr_debug("sep crypto queue null tfm\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg) {
+ pr_debug("sep crypto queue null __crt_alg\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg->cra_type) {
+ pr_debug("sep crypto queue null cra_type\n");
+ return;
+ }
+
+ /* we have stuff in the queue */
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ /* This is for a cypher */
+ pr_debug("sep crypto queue doing cipher\n");
+ cypher_req = container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ if (!cypher_req) {
+ pr_debug("sep crypto queue null cypher_req\n");
+ return;
+ }
+
+ sep_crypto_block((void *)cypher_req);
+ return;
+ } else {
+ /* This is a hash */
+ pr_debug("sep crypto queue doing hash\n");
+ /**
+ * This is a bit more complex than cipher; we
+ * need to figure out what type of operation
+ */
+ hash_req = ahash_request_cast(async_req);
+ if (!hash_req) {
+ pr_debug("sep crypto queue null hash_req\n");
+ return;
+ }
+
+ hash_tfm = crypto_ahash_reqtfm(hash_req);
+ if (!hash_tfm) {
+ pr_debug("sep crypto queue null hash_tfm\n");
+ return;
+ }
+
+
+ sctx = crypto_ahash_ctx(hash_tfm);
+ if (!sctx) {
+ pr_debug("sep crypto queue null sctx\n");
+ return;
+ }
+
+ ta_ctx = ahash_request_ctx(hash_req);
+
+ if (ta_ctx->current_hash_stage == HASH_INIT) {
+ pr_debug("sep crypto queue hash init\n");
+ sep_hash_init((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
+ pr_debug("sep crypto queue hash update\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
+ pr_debug("sep crypto queue hash final\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_digest((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else {
+ pr_debug("sep crypto queue hash oops nothing\n");
+ return;
+ }
+ }
+}
+
+static int sep_sha1_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_crypto_init(struct crypto_tfm *tfm)
+{
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ if (alg_name == NULL)
+ pr_debug("sep_crypto_init alg is NULL\n");
+ else
+ pr_debug("sep_crypto_init alg is %s\n", alg_name);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
+ return 0;
+}
+
+static void sep_crypto_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_crypto_exit\n");
+}
+
+static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("sep aes setkey\n");
+
+ pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
+ switch (keylen) {
+ case SEP_AES_KEY_128_SIZE:
+ sctx->aes_key_size = AES_128;
+ break;
+ case SEP_AES_KEY_192_SIZE:
+ sctx->aes_key_size = AES_192;
+ break;
+ case SEP_AES_KEY_256_SIZE:
+ sctx->aes_key_size = AES_256;
+ break;
+ case SEP_AES_KEY_512_SIZE:
+ sctx->aes_key_size = AES_512;
+ break;
+ default:
+ pr_debug("invalid sep aes key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.aes, 0, sizeof(u32) *
+ SEP_AES_MAX_KEY_SIZE_WORDS);
+ memcpy(&sctx->key.aes, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc decrypt\n");
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+ u32 *flags = &ctfm->crt_flags;
+
+ pr_debug("sep des setkey\n");
+
+ switch (keylen) {
+ case DES_KEY_SIZE:
+ sctx->des_nbr_keys = DES_KEY_1;
+ break;
+ case DES_KEY_SIZE * 2:
+ sctx->des_nbr_keys = DES_KEY_2;
+ break;
+ case DES_KEY_SIZE * 3:
+ sctx->des_nbr_keys = DES_KEY_3;
+ break;
+ default:
+ pr_debug("invalid key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
+ (sep_weak_key(key, keylen))) {
+
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug("weak key\n");
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
+ memcpy(&sctx->key.des.key1, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static struct ahash_alg hash_algs[] = {
+{
+ .init = sep_sha1_init,
+ .update = sep_sha1_update,
+ .final = sep_sha1_final,
+ .digest = sep_sha1_digest,
+ .finup = sep_sha1_finup,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_md5_init,
+ .update = sep_md5_update,
+ .final = sep_md5_final,
+ .digest = sep_md5_digest,
+ .finup = sep_md5_finup,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha224_init,
+ .update = sep_sha224_update,
+ .final = sep_sha224_final,
+ .digest = sep_sha224_digest,
+ .finup = sep_sha224_finup,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha256_init,
+ .update = sep_sha256_update,
+ .final = sep_sha256_final,
+ .digest = sep_sha256_digest,
+ .finup = sep_sha256_finup,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+}
+};
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_ecb_encrypt,
+ .decrypt = sep_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_cbc_encrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .decrypt = sep_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des)",
+ .cra_driver_name = "ebc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des3-ede)",
+ .cra_driver_name = "ebc-des3-ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3-ede)",
+ .cra_driver_name = "cbc-des3--ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+}
+};
+
+int sep_crypto_setup(void)
+{
+ int err, i, j, k;
+ tasklet_init(&sep_dev->finish_tasklet, sep_finish,
+ (unsigned long)sep_dev);
+
+ crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
+
+ sep_dev->workqueue = create_singlethread_workqueue(
+ "sep_crypto_workqueue");
+ if (!sep_dev->workqueue) {
+ dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ j = 0;
+
+ spin_lock_init(&queue_lock);
+
+ err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
+ err = crypto_register_ahash(&hash_algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ err = 0;
+ for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
+ err = crypto_register_alg(&crypto_algs[j]);
+ if (err)
+ goto err_crypto_algs;
+ }
+
+ return err;
+
+err_algs:
+ for (k = 0; k < i; k++)
+ crypto_unregister_ahash(&hash_algs[k]);
+ return err;
+
+err_crypto_algs:
+ for (k = 0; k < j; k++)
+ crypto_unregister_alg(&crypto_algs[k]);
+ goto err_algs;
+}
+
+void sep_crypto_takedown(void)
+{
+
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+ crypto_unregister_ahash(&hash_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
+ crypto_unregister_alg(&crypto_algs[i]);
+
+ tasklet_kill(&sep_dev->finish_tasklet);
+}
+
+#endif
diff --git a/drivers/staging/sep/sep_crypto.h b/drivers/staging/sep/sep_crypto.h
new file mode 100644
index 00000000000..155c3c9b87c
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.h
@@ -0,0 +1,359 @@
+/*
+ *
+ * sep_crypto.h - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* Constants for SEP (from vendor) */
+#define SEP_START_MSG_TOKEN 0x02558808
+
+#define SEP_DES_IV_SIZE_WORDS 2
+#define SEP_DES_IV_SIZE_BYTES (SEP_DES_IV_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_KEY_SIZE_WORDS 2
+#define SEP_DES_KEY_SIZE_BYTES (SEP_DES_KEY_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_BLOCK_SIZE 8
+#define SEP_DES_DUMMY_SIZE 16
+
+#define SEP_DES_INIT_OPCODE 0x10
+#define SEP_DES_BLOCK_OPCODE 0x11
+
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+#define SEP_AES_BLOCK_SIZE_BYTES \
+ (SEP_AES_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_DUMMY_BLOCK_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS SEP_AES_BLOCK_SIZE_WORDS
+#define SEP_AES_IV_SIZE_BYTES \
+ (SEP_AES_IV_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_KEY_128_SIZE 16
+#define SEP_AES_KEY_192_SIZE 24
+#define SEP_AES_KEY_256_SIZE 32
+#define SEP_AES_KEY_512_SIZE 64
+#define SEP_AES_MAX_KEY_SIZE_WORDS 16
+#define SEP_AES_MAX_KEY_SIZE_BYTES \
+ (SEP_AES_MAX_KEY_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_WRAP_MIN_SIZE 8
+#define SEP_AES_WRAP_MAX_SIZE 0x10000000
+
+#define SEP_AES_WRAP_BLOCK_SIZE_WORDS 2
+#define SEP_AES_WRAP_BLOCK_SIZE_BYTES \
+ (SEP_AES_WRAP_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_SECRET_RKEK1 0x1
+#define SEP_AES_SECRET_RKEK2 0x2
+
+#define SEP_AES_INIT_OPCODE 0x2
+#define SEP_AES_BLOCK_OPCODE 0x3
+#define SEP_AES_FINISH_OPCODE 0x4
+#define SEP_AES_WRAP_OPCODE 0x6
+#define SEP_AES_UNWRAP_OPCODE 0x7
+#define SEP_AES_XTS_FINISH_OPCODE 0x8
+
+#define SEP_HASH_RESULT_SIZE_WORDS 16
+#define SEP_MD5_DIGEST_SIZE_WORDS 4
+#define SEP_MD5_DIGEST_SIZE_BYTES \
+ (SEP_MD5_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA1_DIGEST_SIZE_WORDS 5
+#define SEP_SHA1_DIGEST_SIZE_BYTES \
+ (SEP_SHA1_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA224_DIGEST_SIZE_WORDS 7
+#define SEP_SHA224_DIGEST_SIZE_BYTES \
+ (SEP_SHA224_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA256_DIGEST_SIZE_WORDS 8
+#define SEP_SHA256_DIGEST_SIZE_BYTES \
+ (SEP_SHA256_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA384_DIGEST_SIZE_WORDS 12
+#define SEP_SHA384_DIGEST_SIZE_BYTES \
+ (SEP_SHA384_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA512_DIGEST_SIZE_WORDS 16
+#define SEP_SHA512_DIGEST_SIZE_BYTES \
+ (SEP_SHA512_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_HASH_BLOCK_SIZE_WORDS 16
+#define SEP_HASH_BLOCK_SIZE_BYTES \
+ (SEP_HASH_BLOCK_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA2_BLOCK_SIZE_WORDS 32
+#define SEP_SHA2_BLOCK_SIZE_BYTES \
+ (SEP_SHA2_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_HASH_INIT_OPCODE 0x20
+#define SEP_HASH_UPDATE_OPCODE 0x21
+#define SEP_HASH_FINISH_OPCODE 0x22
+#define SEP_HASH_SINGLE_OPCODE 0x23
+
+#define SEP_HOST_ERROR 0x0b000000
+#define SEP_OK 0x0
+#define SEP_INVALID_START (SEP_HOST_ERROR + 0x3)
+#define SEP_WRONG_OPCODE (SEP_HOST_ERROR + 0x1)
+
+#define SEP_TRANSACTION_WAIT_TIME 5
+
+#define SEP_QUEUE_LENGTH 2
+/* Macros */
+#ifndef __LITTLE_ENDIAN
+#define CHG_ENDIAN(val) \
+ (((val) >> 24) | \
+ (((val) & 0x00FF0000) >> 8) | \
+ (((val) & 0x0000FF00) << 8) | \
+ (((val) & 0x000000FF) << 24))
+#else
+#define CHG_ENDIAN(val) val
+#endif
+/* Enums for SEP (from vendor) */
+enum des_numkey {
+ DES_KEY_1 = 1,
+ DES_KEY_2 = 2,
+ DES_KEY_3 = 3,
+ SEP_NUMKEY_OPTIONS,
+ SEP_NUMKEY_LAST = 0x7fffffff,
+};
+
+enum des_enc_mode {
+ SEP_DES_ENCRYPT = 0,
+ SEP_DES_DECRYPT = 1,
+ SEP_DES_ENC_OPTIONS,
+ SEP_DES_ENC_LAST = 0x7fffffff,
+};
+
+enum des_op_mode {
+ SEP_DES_ECB = 0,
+ SEP_DES_CBC = 1,
+ SEP_OP_OPTIONS,
+ SEP_OP_LAST = 0x7fffffff,
+};
+
+enum aes_keysize {
+ AES_128 = 0,
+ AES_192 = 1,
+ AES_256 = 2,
+ AES_512 = 3,
+ AES_SIZE_OPTIONS,
+ AEA_SIZE_LAST = 0x7FFFFFFF,
+};
+
+enum aes_enc_mode {
+ SEP_AES_ENCRYPT = 0,
+ SEP_AES_DECRYPT = 1,
+ SEP_AES_ENC_OPTIONS,
+ SEP_AES_ENC_LAST = 0x7FFFFFFF,
+};
+
+enum aes_op_mode {
+ SEP_AES_ECB = 0,
+ SEP_AES_CBC = 1,
+ SEP_AES_MAC = 2,
+ SEP_AES_CTR = 3,
+ SEP_AES_XCBC = 4,
+ SEP_AES_CMAC = 5,
+ SEP_AES_XTS = 6,
+ SEP_AES_OP_OPTIONS,
+ SEP_AES_OP_LAST = 0x7FFFFFFF,
+};
+
+enum hash_op_mode {
+ SEP_HASH_SHA1 = 0,
+ SEP_HASH_SHA224 = 1,
+ SEP_HASH_SHA256 = 2,
+ SEP_HASH_SHA384 = 3,
+ SEP_HASH_SHA512 = 4,
+ SEP_HASH_MD5 = 5,
+ SEP_HASH_OPTIONS,
+ SEP_HASH_LAST_MODE = 0x7FFFFFFF,
+};
+
+/* Structures for SEP (from vendor) */
+struct sep_des_internal_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_des_internal_context {
+ u32 iv_context[SEP_DES_IV_SIZE_WORDS];
+ struct sep_des_internal_key context_key;
+ enum des_numkey nbr_keys;
+ enum des_enc_mode encryption;
+ enum des_op_mode operation;
+ u8 dummy_block[SEP_DES_DUMMY_SIZE];
+};
+
+struct sep_des_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 ctx_buf[sizeof(struct sep_des_internal_context)];
+};
+
+/* This is the structure passed to SEP via msg area */
+struct sep_des_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+ u32 pad[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_aes_internal_context {
+ u32 aes_ctx_iv[SEP_AES_IV_SIZE_WORDS];
+ u32 aes_ctx_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ enum aes_keysize keysize;
+ enum aes_enc_mode encmode;
+ enum aes_op_mode opmode;
+ u8 secret_key;
+ u32 no_add_blocks;
+ u32 last_block_size;
+ u32 last_block[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 prev_iv[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 remaining_size;
+ union {
+ struct {
+ u32 dkey1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey2[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey3[SEP_AES_BLOCK_SIZE_WORDS];
+ } cmac_data;
+ struct {
+ u32 xts_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ u32 temp1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 temp2[SEP_AES_BLOCK_SIZE_WORDS];
+ } xtx_data;
+ } s_data;
+ u8 dummy_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_aes_private_context {
+ u32 valid_tag;
+ u32 aes_iv;
+ u32 op_mode;
+ u8 cbuff[sizeof(struct sep_aes_internal_context)];
+};
+
+struct sep_hash_internal_context {
+ u32 hash_result[SEP_HASH_RESULT_SIZE_WORDS];
+ enum hash_op_mode hash_opmode;
+ u32 previous_data[SEP_SHA2_BLOCK_SIZE_WORDS];
+ u16 prev_update_bytes;
+ u32 total_proc_128bit[4];
+ u16 op_mode_block_size;
+ u8 dummy_aes_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_hash_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 internal_context[sizeof(struct sep_hash_internal_context)];
+};
+
+union key_t {
+ struct sep_des_key des;
+ u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
+};
+
+/* Context structures for crypto API */
+/**
+ * Structure for this current task context
+ * This same structure is used for both hash
+ * and crypt in order to reduce duplicate code
+ * for stuff that is done for both hash operations
+ * and crypto operations. We cannot trust that the
+ * system context is not pulled out from under
+ * us during operation to operation, so all
+ * critical stuff such as data pointers must
+ * be in in a context that is exclusive for this
+ * particular task at hand.
+ */
+struct this_task_ctx {
+ struct sep_device *sep_used;
+ u32 done;
+ unsigned char iv[100];
+ enum des_enc_mode des_encmode;
+ enum des_op_mode des_opmode;
+ enum aes_enc_mode aes_encmode;
+ enum aes_op_mode aes_opmode;
+ u32 init_opcode;
+ u32 block_opcode;
+ size_t data_length;
+ size_t ivlen;
+ struct ablkcipher_walk walk;
+ int i_own_sep; /* Do I have custody of the sep? */
+ struct sep_call_status call_status;
+ struct build_dcb_struct_kernel dcb_input_data;
+ struct sep_dma_context *dma_ctx;
+ void *dmatables_region;
+ size_t nbytes;
+ struct sep_dcblock *dcb_region;
+ struct sep_queue_info *queue_elem;
+ int msg_len_words;
+ unsigned char msg[SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES];
+ void *msgptr;
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg_hold;
+ struct scatterlist *dst_sg_hold;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ enum type_of_request current_request;
+ int digest_size_words;
+ int digest_size_bytes;
+ int block_size_words;
+ int block_size_bytes;
+ enum hash_op_mode hash_opmode;
+ enum hash_stage current_hash_stage;
+ /**
+ * Not that this is a pointer. The are_we_done_yet variable is
+ * allocated by the task function. This way, even if the kernel
+ * crypto infrastructure has grabbed the task structure out from
+ * under us, the task function can still see this variable.
+ */
+ int *are_we_done_yet;
+ unsigned long end_time;
+ };
+
+struct sep_system_ctx {
+ union key_t key;
+ size_t keylen;
+ int key_sent;
+ enum des_numkey des_nbr_keys;
+ enum aes_keysize aes_key_size;
+ unsigned long end_time;
+ struct sep_des_private_context des_private_ctx;
+ struct sep_aes_private_context aes_private_ctx;
+ struct sep_hash_private_context hash_private_ctx;
+ };
+
+/* work queue structures */
+struct sep_work_struct {
+ struct work_struct work;
+ void (*callback)(void *);
+ void *data;
+ };
+
+/* Functions */
+int sep_crypto_setup(void);
+void sep_crypto_takedown(void);
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
index 696ab0dd2b7..5f6a07f59dd 100644
--- a/drivers/staging/sep/sep_dev.h
+++ b/drivers/staging/sep/sep_dev.h
@@ -5,8 +5,8 @@
*
* sep_dev.h - Security Processor Device Structures
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,6 +28,7 @@
*
* CHANGES
* 2010.09.14 upgrade to Medfield
+ * 2011.02.22 enable kernel crypto
*/
struct sep_device {
@@ -36,33 +37,21 @@ struct sep_device {
/* character device file */
struct cdev sep_cdev;
- struct cdev sep_daemon_cdev;
- struct cdev sep_singleton_cdev;
/* devices (using misc dev) */
struct miscdevice miscdev_sep;
- struct miscdevice miscdev_singleton;
- struct miscdevice miscdev_daemon;
/* major / minor numbers of device */
dev_t sep_devno;
- dev_t sep_daemon_devno;
- dev_t sep_singleton_devno;
-
- struct mutex sep_mutex;
- struct mutex ioctl_mutex;
+ /* guards command sent counter */
spinlock_t snd_rply_lck;
+ /* guards driver memory usage in fastcall if */
+ struct semaphore sep_doublebuf;
/* flags to indicate use and lock status of sep */
u32 pid_doing_transaction;
unsigned long in_use_flags;
- /* request daemon alread open */
- unsigned long request_daemon_open;
-
- /* 1 = Moorestown; 0 = Medfield */
- int mrst;
-
/* address of the shared memory allocated during init for SEP driver
(coherent alloc) */
dma_addr_t shared_bus;
@@ -74,36 +63,77 @@ struct sep_device {
dma_addr_t reg_physical_end;
void __iomem *reg_addr;
- /* wait queue head (event) of the driver */
- wait_queue_head_t event;
- wait_queue_head_t event_request_daemon;
- wait_queue_head_t event_mmap;
+ /* wait queue heads of the driver */
+ wait_queue_head_t event_interrupt;
+ wait_queue_head_t event_transactions;
- struct sep_caller_id_entry
- caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES];
+ struct list_head sep_queue_status;
+ u32 sep_queue_num;
+ spinlock_t sep_queue_lock;
- /* access flag for singleton device */
- unsigned long singleton_access_flag;
+ /* Is this in use? */
+ u32 in_use;
+
+ /* indicates whether power save is set up */
+ u32 power_save_setup;
+
+ /* Power state */
+ u32 power_state;
/* transaction counter that coordinates the
transactions between SEP and HOST */
unsigned long send_ct;
/* counter for the messages from sep */
unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the
- current transaction */
- long data_pool_bytes_allocated;
- u32 num_of_data_allocations;
+ /* The following are used for kernel crypto client requests */
+ u32 in_kernel; /* Set for kernel client request */
+ struct tasklet_struct finish_tasklet;
+ enum type_of_request current_request;
+ enum hash_stage current_hash_stage;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ struct this_task_ctx *ta_ctx;
+ struct workqueue_struct *workqueue;
+};
- /* number of the lli tables created in the current transaction */
- u32 num_lli_tables_created;
+extern struct sep_device *sep_dev;
- /* number of data control blocks */
- u32 nr_dcb_creat;
+/**
+ * SEP message header for a transaction
+ * @reserved: reserved memory (two words)
+ * @token: SEP message token
+ * @msg_len: message length
+ * @opcpde: message opcode
+ */
+struct sep_msgarea_hdr {
+ u32 reserved[2];
+ u32 token;
+ u32 msg_len;
+ u32 opcode;
+};
- struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+/**
+ * sep_queue_data - data to be maintained in status queue for a transaction
+ * @opcode : transaction opcode
+ * @size : message size
+ * @pid: owner process
+ * @name: owner process name
+ */
+struct sep_queue_data {
+ u32 opcode;
+ u32 size;
+ s32 pid;
+ u8 name[TASK_COMM_LEN];
+};
+/** sep_queue_info - maintains status info of all transactions
+ * @list: head of list
+ * @sep_queue_data : data for transaction
+ */
+struct sep_queue_info {
+ struct list_head list;
+ struct sep_queue_data data;
};
static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 6b3d156d414..00000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2932 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- * Jayant Mangalampalli jayant.mangalampalli@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- * 2010.09.14 Upgrade to Medfield
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/ioctl.h>
-#include <asm/current.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/rar_register.h>
-
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device *sep_dev;
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: SEP device
- */
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- u32 *p = sep->shared_addr;
- for (count = 0; count < 12 * 4; count += 4)
- dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
- count, *p++);
-}
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- */
-static int sep_map_and_alloc_shared_area(struct sep_device *sep)
-{
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
- sep->shared_size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- dev_warn(&sep->pdev->dev,
- "shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- dev_dbg(&sep->pdev->dev,
- "shared_addr %zx bytes @%p (bus %llx)\n",
- sep->shared_size, sep->shared_addr,
- (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep)
-{
- dma_free_coherent(&sep->pdev->dev, sep->shared_size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- * @sep: pointer to struct sep_device
- * @bus_address: address to convert
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-/**
- * open function for the singleton driver
- * @inode_ptr struct inode *
- * @file_ptr struct file *
- *
- * Called when the user opens the singleton device interface
- */
-static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
-
- file_ptr->private_data = sep;
-
- if (test_and_set_bit(0, &sep->singleton_access_flag))
- return -EBUSY;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
- filp->private_data = sep;
-
- /* Anyone can open; locking takes place at transaction level */
- return 0;
-}
-
-/**
- * sep_singleton_release - close a SEP singleton device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-static int sep_singleton_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- clear_bit(0, &sep->singleton_access_flag);
- return 0;
-}
-
-/**
- * sep_request_daemon_open - request daemon open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP request daemon. Called when
- * request daemon in userspace opens the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_request_daemon_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = sep_dev;
- int error = 0;
-
- filp->private_data = sep;
-
- /* There is supposed to be only one request daemon */
- if (test_and_set_bit(0, &sep->request_daemon_open))
- error = -EBUSY;
- return error;
-}
-
-/**
- * sep_request_daemon_release - close a SEP daemon
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP daemon.
- */
-static int sep_request_daemon_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
- current->pid);
-
- /* Clear the request_daemon_open flag */
- clear_bit(0, &sep->request_daemon_open);
- return 0;
-}
-
-/**
- * sep_req_daemon_send_reply_command_handler - poke the SEP
- * @sep: struct sep_device *
- *
- * This function raises interrupt to SEPm that signals that is has a
- * new command from HOST
- */
-static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
-
- sep_dump_message(sep);
-
- /* Counters are lockable region */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- sep->reply_ct++;
-
- /* Send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- sep->send_ct++;
-
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- return 0;
-}
-
-
-/**
- * sep_free_dma_table_data_handler - free DMA table
- * @sep: pointere to struct sep_device
- *
- * Handles the request to free DMA table for synchronic actions
- */
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- int count;
- int dcb_counter;
- /* Pointer to the current dma_resource struct */
- struct sep_dma_resource *dma;
-
- for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
- dma = &sep->dma_res_arr[dcb_counter];
-
- /* Unmap and free input map array */
- if (dma->in_map_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->in_map_array[count].dma_addr,
- dma->in_map_array[count].size,
- DMA_TO_DEVICE);
- }
- kfree(dma->in_map_array);
- }
-
- /* Unmap output map array, DON'T free it yet */
- if (dma->out_map_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->out_map_array[count].dma_addr,
- dma->out_map_array[count].size,
- DMA_FROM_DEVICE);
- }
- kfree(dma->out_map_array);
- }
-
- /* Free page cache for output */
- if (dma->in_page_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- flush_dcache_page(dma->in_page_array[count]);
- page_cache_release(dma->in_page_array[count]);
- }
- kfree(dma->in_page_array);
- }
-
- if (dma->out_page_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- if (!PageReserved(dma->out_page_array[count]))
- SetPageDirty(dma->out_page_array[count]);
- flush_dcache_page(dma->out_page_array[count]);
- page_cache_release(dma->out_page_array[count]);
- }
- kfree(dma->out_page_array);
- }
-
- /* Reset all the values */
- dma->in_page_array = NULL;
- dma->out_page_array = NULL;
- dma->in_num_pages = 0;
- dma->out_num_pages = 0;
- dma->in_map_array = NULL;
- dma->out_map_array = NULL;
- dma->in_map_num_entries = 0;
- dma->out_map_num_entries = 0;
- }
-
- sep->nr_dcb_creat = 0;
- sep->num_lli_tables_created = 0;
-
- return 0;
-}
-
-/**
- * sep_request_daemon_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called by the kernel when the daemon attempts an mmap() syscall
- * using our handle.
- */
-static int sep_request_daemon_mmap(struct file *filp,
- struct vm_area_struct *vma)
-{
- struct sep_device *sep = filp->private_data;
- dma_addr_t bus_address;
- int error = 0;
-
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* Get physical address */
- bus_address = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
-
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function;
- }
-
-end_function:
- return error;
-}
-
-/**
- * sep_request_daemon_poll - poll implementation
- * @sep: struct sep_device * for current SEP device
- * @filp: struct file * for open file
- * @wait: poll_table * for poll
- *
- * Called when our device is part of a poll() or select() syscall
- */
-static unsigned int sep_request_daemon_poll(struct file *filp,
- poll_table *wait)
-{
- u32 mask = 0;
- /* GPR2 register */
- u32 retval2;
- unsigned long lck_flags;
- struct sep_device *sep = filp->private_data;
-
- poll_wait(filp, &sep->event_request_daemon, wait);
-
- dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- /* Check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: data check (GPR2) is %x\n", retval2);
-
- /* Check if PRINT request */
- if ((retval2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
- mask |= POLLIN;
- goto end_function;
- }
- /* Check if NVS request */
- if (retval2 >> 31) {
- dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
- mask |= POLLPRI | POLLWRNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: no reply received; returning 0\n");
- mask = 0;
- }
-end_function:
- return mask;
-}
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device.
- */
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
-
- mutex_lock(&sep->sep_mutex);
- /* Is this the process that has a transaction open?
- * If so, lets reset pid_doing_transaction to 0 and
- * clear the in use flags, and then wake up sep_event
- * so that other processes can do transactions
- */
- if (sep->pid_doing_transaction == current->pid) {
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_free_dma_table_data_handler(sep);
- wake_up(&sep->event);
- sep->pid_doing_transaction = 0;
- }
-
- mutex_unlock(&sep->sep_mutex);
- return 0;
-}
-
-/**
- * sep_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called on an mmap of our space via the normal SEP device
- */
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
- unsigned long error = 0;
-
- /* Set the transaction busy (own the device) */
- wait_event_interruptible(sep->event,
- test_and_set_bit(SEP_MMAP_LOCK_BIT,
- &sep->in_use_flags) == 0);
-
- if (signal_pending(current)) {
- error = -EINTR;
- goto end_function_with_error;
- }
- /*
- * The pid_doing_transaction indicates that this process
- * now owns the facilities to performa a transaction with
- * the SEP. While this process is performing a transaction,
- * no other process who has the SEP device open can perform
- * any transactions. This method allows more than one process
- * to have the device open at any given time, which provides
- * finer granularity for device utilization by multiple
- * processes.
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = current->pid;
- mutex_unlock(&sep->sep_mutex);
-
- /* Zero the pools and the number of data pool alocation pointers */
- sep->data_pool_bytes_allocated = 0;
- sep->num_of_data_allocations = 0;
-
- /*
- * Check that the size of the mapped range is as the size of the message
- * shared area
- */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function_with_error;
- }
-
- dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
-
- /* Get bus address */
- bus_addr = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function_with_error;
- }
- goto end_function;
-
-end_function_with_error:
- /* Clear the bit */
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
-
- /* Raise event for stuck contextes */
-
- wake_up(&sep->event);
-
-end_function:
- return error;
-}
-
-/**
- * sep_poll - poll handler
- * @filp: pointer to struct file
- * @wait: pointer to poll_table
- *
- * Called by the OS when the kernel is asked to do a poll on
- * a SEP file handle.
- */
-static unsigned int sep_poll(struct file *filp, poll_table *wait)
-{
- u32 mask = 0;
- u32 retval = 0;
- u32 retval2 = 0;
- unsigned long lck_flags;
-
- struct sep_device *sep = filp->private_data;
-
- /* Am I the process that owns the transaction? */
- mutex_lock(&sep->sep_mutex);
- if (current->pid != sep->pid_doing_transaction) {
- dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
- mask = POLLERR;
- mutex_unlock(&sep->sep_mutex);
- goto end_function;
- }
- mutex_unlock(&sep->sep_mutex);
-
- /* Check if send command or send_reply were activated previously */
- if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- mask = POLLERR;
- goto end_function;
- }
-
- /* Add the event to the polling wait table */
- dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
-
- poll_wait(filp, &sep->event, wait);
-
- dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Check if error occurred during poll */
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- if (retval2 != 0x0) {
- dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
- mask |= POLLERR;
- goto end_function;
- }
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
-
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
- retval);
-
- /* Check if printf request */
- if ((retval >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
- wake_up(&sep->event_request_daemon);
- goto end_function;
- }
-
- /* Check if the this is SEP reply or request */
- if (retval >> 31) {
- dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "poll: normal return\n");
- /* In case it is again by send_reply_comand */
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_dump_message(sep);
- dev_dbg(&sep->pdev->dev,
- "poll; SEP reply POLLIN | POLLRDNORM\n");
- mask |= POLLIN | POLLRDNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "poll; no reply received; returning mask of 0\n");
- mask = 0;
- }
-
-end_function:
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the SEP mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* Address of time as seen by the kernel */
-
-
- do_gettimeofday(&time);
-
- /* Set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
- dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
- dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_set_caller_id_handler - insert caller id entry
- * @sep: SEP device
- * @arg: pointer to struct caller_id_struct
- *
- * Inserts the data into the caller id table. Note that this function
- * falls under the ioctl lock
- */
-static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
-{
- void __user *hash;
- int error = 0;
- int i;
- struct caller_id_struct command_args;
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == 0)
- break;
- }
-
- if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
- dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
- dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
- SEP_CALLER_ID_TABLE_NUM_ENTRIES);
- error = -EUSERS;
- goto end_function;
- }
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- hash = (void __user *)(unsigned long)command_args.callerIdAddress;
-
- if (!command_args.pid || !command_args.callerIdSizeInBytes) {
- error = -EINVAL;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
- dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
- command_args.callerIdSizeInBytes);
-
- if (command_args.callerIdSizeInBytes >
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
- error = -EMSGSIZE;
- goto end_function;
- }
-
- sep->caller_id_table[i].pid = command_args.pid;
-
- if (copy_from_user(sep->caller_id_table[i].callerIdHash,
- hash, command_args.callerIdSizeInBytes))
- error = -EFAULT;
-end_function:
- return error;
-}
-
-/**
- * sep_set_current_caller_id - set the caller id
- * @sep: pointer to struct_sep_device
- *
- * Set the caller ID (if it exists) to the SEP. Note that this
- * function falls under the ioctl lock
- */
-static int sep_set_current_caller_id(struct sep_device *sep)
-{
- int i;
- u32 *hash_buf_ptr;
-
- /* Zero the previous value */
- memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == current->pid) {
- dev_dbg(&sep->pdev->dev, "Caller Id found\n");
-
- memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- (void *)(sep->caller_id_table[i].callerIdHash),
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
- break;
- }
- }
- /* Ensure data is in little endian */
- hash_buf_ptr = (u32 *)sep->shared_addr +
- SEP_CALLER_ID_OFFSET_BYTES;
-
- for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
- hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
-
- return 0;
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: SEP being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- *
- * Note that this function does fall under the ioctl lock
- */
-static int sep_send_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
- int error = 0;
-
- if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- error = -EPROTO;
- goto end_function;
- }
- sep_set_time(sep);
-
- sep_set_current_caller_id(sep);
-
- sep_dump_message(sep);
-
- /* Update counter */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_send_command_handler send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
-end_function:
- return error;
-}
-
-/**
- * sep_allocate_data_pool_memory_handler -allocate pool memory
- * @sep: pointer to struct sep_device
- * @arg: pointer to struct alloc_struct
- *
- * This function handles the allocate data pool memory request
- * This function returns calculates the bus address of the
- * allocated memory, and the offset of this area from the mapped address.
- * Therefore, the FVOs in user space can calculate the exact virtual
- * address of this allocated memory
- */
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- struct alloc_struct command_args;
-
- /* Holds the allocated buffer address in the system memory pool */
- u32 *token_addr;
-
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct alloc_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev,
- "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
- dev_dbg(&sep->pdev->dev,
- "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
- /* Set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Place in the shared area that is known by the SEP */
- token_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
- (sep->num_of_data_allocations)*2*sizeof(u32));
-
- token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
- token_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Write the memory back to the user space */
- error = copy_to_user((void *)arg, (void *)&command_args,
- sizeof(struct alloc_struct));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Update the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
- sep->num_of_data_allocations += 1;
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_kernel_pages - map kernel pages for DMA
- * @sep: pointer to struct sep_device
- * @kernel_virt_addr: address of data buffer in kernel
- * @data_size: size of data
- * @lli_array_ptr: lli array
- * @in_out_flag: input into device or output from device
- *
- * This function locks all the physical pages of the kernel virtual buffer
- * and construct a basic lli array, where each entry holds the physical
- * page address and the size that application data holds in this page
- * This function is used only during kernel crypto mod calls from within
- * the kernel (when ioctl is not used)
- */
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
-
- dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
- (unsigned long)kernel_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
- if (!lli_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
- if (!map_array) {
- error = -ENOMEM;
- goto end_function_with_error;
- }
-
- map_array[0].dma_addr =
- dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
- data_size, DMA_BIDIRECTIONAL);
- map_array[0].size = data_size;
-
-
- /*
- * Set the start address of the first page - app data may start not at
- * the beginning of the page
- */
- lli_array[0].bus_address = (u32)map_array[0].dma_addr;
- lli_array[0].block_size = map_array[0].size;
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[0].bus_address,
- lli_array[0].block_size);
-
- /* Set the output parameters */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
- }
- goto end_function;
-
-end_function_with_error:
- kfree(lli_array);
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_user_pages - lock and map user pages for DMA
- * @sep: pointer to struct sep_device
- * @app_virt_addr: user memory data buffer
- * @data_size: size of data buffer
- * @lli_array_ptr: lli array
- * @in_out_flag: input or output to device
- *
- * This function locks all the physical pages of the application
- * virtual buffer and construct a basic lli array, where each entry
- * holds the physical page address and the size that application
- * data holds in this physical pages
- */
-static int sep_lock_user_pages(struct sep_device *sep,
- u32 app_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- u32 count;
- int result;
- /* The the page of the end address of the user space buffer */
- u32 end_page;
- /* The page of the start address of the user space buffer */
- u32 start_page;
- /* The range in pages */
- u32 num_pages;
- /* Array of pointers to page */
- struct page **page_array;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
- /* Direction of the DMA mapping for locked pages */
- enum dma_data_direction dir;
-
- /* Set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
- dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
- dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
-
- /* Allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
- if (!map_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
- GFP_ATOMIC);
-
- if (!lli_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* Convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr,
- num_pages,
- ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
- 0, page_array, NULL);
-
- up_read(&current->mm->mmap_sem);
-
- /* Check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dev_warn(&sep->pdev->dev,
- "not all pages locked by get_user_pages\n");
- error = -ENOMEM;
- goto end_function_with_error3;
- }
-
- dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
-
- /* Set direction */
- if (in_out_flag == SEP_DRIVER_IN_FLAG)
- dir = DMA_TO_DEVICE;
- else
- dir = DMA_FROM_DEVICE;
-
- /*
- * Fill the array using page array data and
- * map the pages - this action will also flush the cache as needed
- */
- for (count = 0; count < num_pages; count++) {
- /* Fill the map array */
- map_array[count].dma_addr =
- dma_map_page(&sep->pdev->dev, page_array[count],
- 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
-
- map_array[count].size = PAGE_SIZE;
-
- /* Fill the lli array entry */
- lli_array[count].bus_address = (u32)map_array[count].dma_addr;
- lli_array[count].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
- count, (unsigned long)lli_array[count].bus_address,
- count, lli_array[count].block_size);
- }
-
- /* Check the offset for the first page */
- lli_array[0].bus_address =
- lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
-
- /* Check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size =
- PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[count].bus_address,
- lli_array[count].block_size);
-
- /* Check the size of the last page */
- if (num_pages > 1) {
- lli_array[num_pages - 1].block_size =
- (app_virt_addr + data_size) & (~PAGE_MASK);
- if (lli_array[num_pages - 1].block_size == 0)
- lli_array[num_pages - 1].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev,
- "lli_array[%x].bus_address is "
- "%08lx, lli_array[%x].block_size is %x\n",
- num_pages - 1,
- (unsigned long)lli_array[num_pages - 1].bus_address,
- num_pages - 1,
- lli_array[num_pages - 1].block_size);
- }
-
- /* Set output params according to the in_out flag */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
- num_pages;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
- page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
- num_pages;
- }
- goto end_function;
-
-end_function_with_error3:
- /* Free lli array */
- kfree(lli_array);
-
-end_function_with_error2:
- kfree(map_array);
-
-end_function_with_error1:
- /* Free page array */
- kfree(page_array);
-
-end_function:
- return error;
-}
-
-/**
- * u32 sep_calculate_lli_table_max_size - size the LLI table
- * @sep: pointer to struct sep_device
- * @lli_in_array_ptr
- * @num_array_entries
- * @last_table_flag
- *
- * This function calculates the size of data that can be inserted into
- * the lli table from this array, such that either the table is full
- * (all entries are entered), or there are no more entries in the
- * lli array
- */
-static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
- struct sep_lli_entry *lli_in_array_ptr,
- u32 num_array_entries,
- u32 *last_table_flag)
-{
- u32 counter;
- /* Table data size */
- u32 table_data_size = 0;
- /* Data size for the next table */
- u32 next_table_data_size;
-
- *last_table_flag = 0;
-
- /*
- * Calculate the data in the out lli table till we fill the whole
- * table or till the data has ended
- */
- for (counter = 0;
- (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
- (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
-
- /*
- * Check if we reached the last entry,
- * meaning this ia the last table to build,
- * and no need to check the block alignment
- */
- if (counter == num_array_entries) {
- /* Set the last table flag */
- *last_table_flag = 1;
- goto end_function;
- }
-
- /*
- * Calculate the data size of the next table.
- * Stop if no entries left or if data size is more the DMA restriction
- */
- next_table_data_size = 0;
- for (; counter < num_array_entries; counter++) {
- next_table_data_size += lli_in_array_ptr[counter].block_size;
- if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
- break;
- }
-
- /*
- * Check if the next table data size is less then DMA rstriction.
- * if it is - recalculate the current table size, so that the next
- * table data size will be adaquete for DMA
- */
- if (next_table_data_size &&
- next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
-
- table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
- next_table_data_size);
-
-end_function:
- return table_data_size;
-}
-
-/**
- * sep_build_lli_table - build an lli array for the given table
- * @sep: pointer to struct sep_device
- * @lli_array_ptr: pointer to lli array
- * @lli_table_ptr: pointer to lli table
- * @num_processed_entries_ptr: pointer to number of entries
- * @num_table_entries_ptr: pointer to number of tables
- * @table_data_size: total data size
- *
- * Builds ant lli table from the lli_array according to
- * the given size of data
- */
-static void sep_build_lli_table(struct sep_device *sep,
- struct sep_lli_entry *lli_array_ptr,
- struct sep_lli_entry *lli_table_ptr,
- u32 *num_processed_entries_ptr,
- u32 *num_table_entries_ptr,
- u32 table_data_size)
-{
- /* Current table data size */
- u32 curr_table_data_size;
- /* Counter of lli array entry */
- u32 array_counter;
-
- /* Init current table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
-
- /* Fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* Update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->bus_address =
- cpu_to_le32(lli_array_ptr[array_counter].bus_address);
-
- lli_table_ptr->block_size =
- cpu_to_le32(lli_array_ptr[array_counter].block_size);
-
- curr_table_data_size += lli_array_ptr[array_counter].block_size;
-
- dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
- lli_table_ptr);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- dev_dbg(&sep->pdev->dev,
- "curr_table_data_size too large\n");
-
- /* Update the size of block in the table */
- lli_table_ptr->block_size -=
- cpu_to_le32((curr_table_data_size - table_data_size));
-
- /* Update the physical address in the lli array */
- lli_array_ptr[array_counter].bus_address +=
- cpu_to_le32(lli_table_ptr->block_size);
-
- /* Update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size =
- (curr_table_data_size - table_data_size);
- } else
- /* Advance to the next entry in the lli_array */
- array_counter++;
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* Set the info entry to default */
- lli_table_ptr->bus_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
-}
-
-/**
- * sep_shared_area_virt_to_bus - map shared area to bus address
- * @sep: pointer to struct sep_device
- * @virt_address: virtual address to convert
- *
- * This functions returns the physical address inside shared area according
- * to the virtual address. It can be either on the externa RAM device
- * (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
- dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
- (unsigned long)
- sep->shared_bus + (virt_address - sep->shared_addr));
-
- return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
-}
-
-/**
- * sep_shared_area_bus_to_virt - map shared area bus address to kernel
- * @sep: pointer to struct sep_device
- * @bus_address: bus address to convert
- *
- * This functions returns the virtual address inside shared area
- * according to the physical address. It can be either on the
- * externa RAM device (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
- (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
- (size_t)(bus_address - sep->shared_bus)));
-
- return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
-}
-
-/**
- * sep_debug_print_lli_tables - dump LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_ptr: pointer to sep_lli_entry
- * @num_table_entries: number of entries
- * @table_data_size: total data size
- *
- * Walk the the list of the print created tables and print all the data
- */
-static void sep_debug_print_lli_tables(struct sep_device *sep,
- struct sep_lli_entry *lli_table_ptr,
- unsigned long num_table_entries,
- unsigned long table_data_size)
-{
- unsigned long table_count = 1;
- unsigned long entries_count = 0;
-
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
-
- while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
- dev_dbg(&sep->pdev->dev,
- "lli table %08lx, table_data_size is %lu\n",
- table_count, table_data_size);
- dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
- num_table_entries);
-
- /* Print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries;
- entries_count++, lli_table_ptr++) {
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr address is %08lx\n",
- (unsigned long) lli_table_ptr);
-
- dev_dbg(&sep->pdev->dev,
- "phys address is %08lx block size is %x\n",
- (unsigned long)lli_table_ptr->bus_address,
- lli_table_ptr->block_size);
- }
- /* Point to the info entry */
- lli_table_ptr--;
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->physical_address is %08lu\n",
- (unsigned long)lli_table_ptr->bus_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
-
- dev_dbg(&sep->pdev->dev,
- "phys table_data_size is %lu num_table_entries is"
- " %lu bus_address is%lu\n", table_data_size,
- num_table_entries, (unsigned long)lli_table_ptr->bus_address);
-
- if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry *)
- sep_shared_bus_to_virt(sep,
- (unsigned long)lli_table_ptr->bus_address);
-
- table_count++;
- }
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
-}
-
-
-/**
- * sep_prepare_empty_lli_table - create a blank LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_addr_ptr: pointer to lli table
- * @num_entries_ptr: pointer to number of entries
- * @table_data_size_ptr: point to table data size
- *
- * This function creates empty lli tables when there is no data
- */
-static void sep_prepare_empty_lli_table(struct sep_device *sep,
- dma_addr_t *lli_table_addr_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- struct sep_lli_entry *lli_table_ptr;
-
- /* Find the area for new table */
- lli_table_ptr =
- (struct sep_lli_entry *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- lli_table_ptr->bus_address = 0;
- lli_table_ptr->block_size = 0;
-
- lli_table_ptr++;
- lli_table_ptr->bus_address = 0xFFFFFFFF;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter value */
- *lli_table_addr_ptr = sep->shared_bus +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created *
- sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the num of entries and table data size for empty table */
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-}
-
-/**
- * sep_prepare_input_dma_table - prepare input DMA mappings
- * @sep: pointer to struct sep_device
- * @data_size:
- * @block_size:
- * @lli_table_ptr:
- * @num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data (kernel cryptio call)
- *
- * This function prepares only input DMA table for synhronic symmetric
- * operations (HASH)
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-{
- int error = 0;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_entry_ptr;
- /* Array of pointers to page */
- struct sep_lli_entry *lli_array_ptr;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_entry = 0;
- /* Num entries in the virtual buffer */
- u32 sep_lli_entries = 0;
- /* Lli table pointer */
- struct sep_lli_entry *in_lli_table_ptr;
- /* The total data in one table */
- u32 table_data_size = 0;
- /* Flag for last table */
- u32 last_table_flag = 0;
- /* Number of entries in lli table */
- u32 num_entries_in_table = 0;
- /* Next table address */
- void *lli_table_alloc_addr = 0;
-
- dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
-
- /* Set the kernel address for first table to be allocated */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- if (data_size == 0) {
- /* Special case - create meptu table - 2 entries, zero data */
- sep_prepare_empty_lli_table(sep, lli_table_ptr,
- num_entries_ptr, table_data_size_ptr);
- goto update_dcb_counter;
- }
-
- /* Check if the pages are in Kernel Virtual Address layout */
- if (is_kva == true)
- /* Lock the pages in the kernel */
- error = sep_lock_kernel_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
- else
- /*
- * Lock the pages of the user buffer
- * and translate them to pages
- */
- error = sep_lock_user_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
-
- if (error)
- goto end_function;
-
- dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = NULL;
-
- sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
-
- /* Loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
-
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- if (lli_table_alloc_addr >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- error = -ENOMEM;
- goto end_function_error;
-
- }
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-
- /* Calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(sep,
- &lli_array_ptr[current_entry],
- (sep_lli_entries - current_entry),
- &last_table_flag);
-
- /*
- * If this is not the last table -
- * then align it to the block size
- */
- if (!last_table_flag)
- table_data_size =
- (table_data_size / block_size) * block_size;
-
- dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
- table_data_size);
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_array_ptr[current_entry],
- in_lli_table_ptr,
- &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == NULL) {
-
- /* Set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_ptr);
-
- } else {
- /* Update the info entry of the previous in table */
- info_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- info_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
- }
- /* Save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
- /* Print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
- *num_entries_ptr, *table_data_size_ptr);
- /* The array of the pages */
- kfree(lli_array_ptr);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
- goto end_function;
-
-end_function_error:
- /* Free all the allocated resources */
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(lli_array_ptr);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
-
-end_function:
- return error;
-
-}
-/**
- * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
- * @sep: pointer to struct sep_device
- * @lli_in_array:
- * @sep_in_lli_entries:
- * @lli_out_array:
- * @sep_out_lli_entries
- * @block_size
- * @lli_table_in_ptr
- * @lli_table_out_ptr
- * @in_num_entries_ptr
- * @out_num_entries_ptr
- * @table_data_size_ptr
- *
- * This function creates the input and output DMA tables for
- * symmetric operations (AES/DES) according to the block
- * size from LLI arays
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_construct_dma_tables_from_lli(
- struct sep_device *sep,
- struct sep_lli_entry *lli_in_array,
- u32 sep_in_lli_entries,
- struct sep_lli_entry *lli_out_array,
- u32 sep_out_lli_entries,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- /* Points to the area where next lli table can be allocated */
- void *lli_table_alloc_addr = 0;
- /* Input lli table */
- struct sep_lli_entry *in_lli_table_ptr = NULL;
- /* Output lli table */
- struct sep_lli_entry *out_lli_table_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_in_entry_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_out_entry_ptr = NULL;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_in_entry = 0;
- /* Points to the first entry to be processed in the lli_out_array */
- u32 current_out_entry = 0;
- /* Max size of the input table */
- u32 in_table_data_size = 0;
- /* Max size of the output table */
- u32 out_table_data_size = 0;
- /* Flag te signifies if this is the last tables build */
- u32 last_table_flag = 0;
- /* The data size that should be in table */
- u32 table_data_size = 0;
- /* Number of etnries in the input table */
- u32 num_entries_in_table = 0;
- /* Number of etnries in the output table */
- u32 num_entries_out_table = 0;
-
- /* Initiate to point after the message area */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- (sep->num_lli_tables_created *
- (sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
-
- /* Loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the first output tables */
- out_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- /* Check if the DMA table area limit was overrun */
- if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
- return -ENOMEM;
- }
-
- /* Update the number of the lli tables created */
- sep->num_lli_tables_created += 2;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Calculate the maximum size of data for input table */
- in_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_in_array[current_in_entry],
- (sep_in_lli_entries - current_in_entry),
- &last_table_flag);
-
- /* Calculate the maximum size of data for output table */
- out_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_out_array[current_out_entry],
- (sep_out_lli_entries - current_out_entry),
- &last_table_flag);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli in_table_data_size is %x\n",
- in_table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli out_table_data_size is %x\n",
- out_table_data_size);
-
- table_data_size = in_table_data_size;
-
- if (!last_table_flag) {
- /*
- * If this is not the last table,
- * then must check where the data is smallest
- * and then align it to the block size
- */
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /*
- * Now calculate the table size so that
- * it will be module block size
- */
- table_data_size = (table_data_size / block_size) *
- block_size;
- }
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_in_array[current_in_entry],
- in_lli_table_ptr,
- &current_in_entry,
- &num_entries_in_table,
- table_data_size);
-
- /* Construct output lli table */
- sep_build_lli_table(sep, &lli_out_array[current_out_entry],
- out_lli_table_ptr,
- &current_out_entry,
- &num_entries_out_table,
- table_data_size);
-
- /* If info entry is null - this is the first table built */
- if (info_in_entry_ptr == NULL) {
- /* Set the output parameters to physical addresses */
- *lli_table_in_ptr =
- sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
-
- *in_num_entries_ptr = num_entries_in_table;
-
- *lli_table_out_ptr =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_in_ptr);
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr is %08lx\n",
- (unsigned long)*lli_table_out_ptr);
- } else {
- /* Update the info entry of the previous in table */
- info_in_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
-
- info_in_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
-
- /* Update the info entry of the previous in table */
- info_out_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- info_out_entry_ptr->block_size =
- ((num_entries_out_table) << 24) |
- (table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr:%08lx %08x\n",
- (unsigned long)info_in_entry_ptr->bus_address,
- info_in_entry_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr:%08lx %08x\n",
- (unsigned long)info_out_entry_ptr->bus_address,
- info_out_entry_ptr->block_size);
- }
-
- /* Save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr +
- num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr +
- num_entries_out_table - 1;
-
- dev_dbg(&sep->pdev->dev,
- "output num_entries_out_table is %x\n",
- (u32)num_entries_out_table);
- dev_dbg(&sep->pdev->dev,
- "output info_in_entry_ptr is %lx\n",
- (unsigned long)info_in_entry_ptr);
- dev_dbg(&sep->pdev->dev,
- "output info_out_entry_ptr is %lx\n",
- (unsigned long)info_out_entry_ptr);
- }
-
- /* Print input tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
- *in_num_entries_ptr,
- *table_data_size_ptr);
-
- /* Print output tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
- *out_num_entries_ptr,
- *table_data_size_ptr);
-
- return 0;
-}
-
-/**
- * sep_prepare_input_output_dma_table - prepare DMA I/O table
- * @app_virt_in_addr:
- * @app_virt_out_addr:
- * @data_size:
- * @block_size:
- * @lli_table_in_ptr:
- * @lli_table_out_ptr:
- * @in_num_entries_ptr:
- * @out_num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data; used only for kernel crypto module
- *
- * This function builds input and output DMA tables for synhronic
- * symmetric operations (AES, DES, HASH). It also checks that each table
- * is of the modular block size
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-
-{
- int error = 0;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_in_array;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_out_array;
-
- if (data_size == 0) {
- /* Prepare empty table for input and output */
- sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
- in_num_entries_ptr, table_data_size_ptr);
-
- sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
- out_num_entries_ptr, table_data_size_ptr);
-
- goto update_dcb_counter;
- }
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
-
- /* Lock the pages of the buffer and translate them to pages */
- if (is_kva == true) {
- error = sep_lock_kernel_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for in failed\n");
- goto end_function;
- }
-
- error = sep_lock_kernel_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for out failed\n");
- goto end_function;
- }
- }
-
- else {
- error = sep_lock_user_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
-
- error = sep_lock_user_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_free_lli_in;
- }
- }
-
- dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
- dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
- dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- /* Call the function that creates table from the lli arrays */
- error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
- lli_out_array,
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
- block_size, lli_table_in_ptr, lli_table_out_ptr,
- in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error;
- }
-
- kfree(lli_out_array);
- kfree(lli_in_array);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
-
- goto end_function;
-
-end_function_with_error:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
- kfree(lli_out_array);
-
-
-end_function_free_lli_in:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
- kfree(lli_in_array);
-
-end_function:
-
- return error;
-
-}
-
-/**
- * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
- * @app_in_address: unsigned long; for data buffer in (user space)
- * @app_out_address: unsigned long; for data buffer out (user space)
- * @data_in_size: u32; for size of data
- * @block_size: u32; for block size
- * @tail_block_size: u32; for size of tail block
- * @isapplet: bool; to indicate external app
- * @is_kva: bool; kernel buffer; only used for kernel crypto module
- *
- * This function prepares the linked DMA tables and puts the
- * address for the linked list of tables inta a DCB (data control
- * block) the address of which is known by the SEP hardware
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
- unsigned long app_in_address,
- unsigned long app_out_address,
- u32 data_in_size,
- u32 block_size,
- u32 tail_block_size,
- bool isapplet,
- bool is_kva)
-{
- int error = 0;
- /* Size of tail */
- u32 tail_size = 0;
- /* Address of the created DCB table */
- struct sep_dcblock *dcb_table_ptr = NULL;
- /* The physical address of the first input DMA table */
- dma_addr_t in_first_mlli_address = 0;
- /* Number of entries in the first input DMA table */
- u32 in_first_num_entries = 0;
- /* The physical address of the first output DMA table */
- dma_addr_t out_first_mlli_address = 0;
- /* Number of entries in the first output DMA table */
- u32 out_first_num_entries = 0;
- /* Data in the first input/output table */
- u32 first_data_size = 0;
-
- if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
- /* No more DCBs to allocate */
- dev_warn(&sep->pdev->dev, "no more DCBs available\n");
- error = -ENOSPC;
- goto end_function;
- }
-
- /* Allocate new DCB */
- dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
- (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
-
- /* Set the default values in the DCB */
- dcb_table_ptr->input_mlli_address = 0;
- dcb_table_ptr->input_mlli_num_entries = 0;
- dcb_table_ptr->input_mlli_data_size = 0;
- dcb_table_ptr->output_mlli_address = 0;
- dcb_table_ptr->output_mlli_num_entries = 0;
- dcb_table_ptr->output_mlli_data_size = 0;
- dcb_table_ptr->tail_data_size = 0;
- dcb_table_ptr->out_vr_tail_pt = 0;
-
- if (isapplet == true) {
-
- /* Check if there is enough data for DMA operation */
- if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)app_in_address, data_in_size);
- } else {
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void __user *)app_in_address,
- data_in_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
-
- dcb_table_ptr->tail_data_size = data_in_size;
-
- /* Set the output user-space address for mem2mem op */
- if (app_out_address)
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address;
-
- /*
- * Update both data length parameters in order to avoid
- * second data copy and allow building of empty mlli
- * tables
- */
- tail_size = 0x0;
- data_in_size = 0x0;
-
- } else {
- if (!app_out_address) {
- tail_size = data_in_size % block_size;
- if (!tail_size) {
- if (tail_block_size == block_size)
- tail_size = block_size;
- }
- } else {
- tail_size = 0;
- }
- }
- if (tail_size) {
- if (tail_size > sizeof(dcb_table_ptr->tail_data))
- return -EINVAL;
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)(app_in_address + data_in_size -
- tail_size), tail_size);
- } else {
- /* We have tail data - copy it to DCB */
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void *)(app_in_address +
- data_in_size - tail_size), tail_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
- if (app_out_address)
- /*
- * Calculate the output address
- * according to tail data size
- */
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address + data_in_size
- - tail_size;
-
- /* Save the real tail data size */
- dcb_table_ptr->tail_data_size = tail_size;
- /*
- * Update the data size without the tail
- * data size AKA data for the dma
- */
- data_in_size = (data_in_size - tail_size);
- }
- }
- /* Check if we need to build only input table or input/output */
- if (app_out_address) {
- /* Prepare input/output tables */
- error = sep_prepare_input_output_dma_table(sep,
- app_in_address,
- app_out_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &out_first_mlli_address,
- &in_first_num_entries,
- &out_first_num_entries,
- &first_data_size,
- is_kva);
- } else {
- /* Prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- app_in_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &in_first_num_entries,
- &first_data_size,
- is_kva);
- }
-
- if (error) {
- dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
- goto end_function;
- }
-
- /* Set the DCB values */
- dcb_table_ptr->input_mlli_address = in_first_mlli_address;
- dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
- dcb_table_ptr->input_mlli_data_size = first_data_size;
- dcb_table_ptr->output_mlli_address = out_first_mlli_address;
- dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
- dcb_table_ptr->output_mlli_data_size = first_data_size;
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
- * @sep: pointer to struct sep_device
- * @isapplet: indicates external application (used for kernel access)
- * @is_kva: indicates kernel addresses (only used for kernel crypto)
- *
- * This function frees the DMA tables and DCB
- */
-static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
- bool is_kva)
-{
- int i = 0;
- int error = 0;
- int error_temp = 0;
- struct sep_dcblock *dcb_table_ptr;
- unsigned long pt_hold;
- void *tail_pt;
-
- if (isapplet == true) {
- /* Set pointer to first DCB table */
- dcb_table_ptr = (struct sep_dcblock *)
- (sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
-
- /* Go over each DCB and see if tail pointer must be updated */
- for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
- if (dcb_table_ptr->out_vr_tail_pt) {
- pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
- tail_pt = (void *)pt_hold;
- if (is_kva == true) {
- memcpy(tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- } else {
- error_temp = copy_to_user(
- tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- }
- if (error_temp) {
- /* Release the DMA resource */
- error = -EFAULT;
- break;
- }
- }
- }
- }
- /* Free the output pages, if any */
- sep_free_dma_table_data_handler(sep);
-
- return error;
-}
-
-/**
- * sep_get_static_pool_addr_handler - get static pool address
- * @sep: pointer to struct sep_device
- *
- * This function sets the bus and virtual addresses of the static pool
- */
-static int sep_get_static_pool_addr_handler(struct sep_device *sep)
-{
- u32 *static_pool_addr = NULL;
-
- static_pool_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
- static_pool_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
- (u32)static_pool_addr[1]);
-
- return 0;
-}
-
-/**
- * sep_end_transaction_handler - end transaction
- * @sep: pointer to struct sep_device
- *
- * This API handles the end transaction request
- */
-static int sep_end_transaction_handler(struct sep_device *sep)
-{
- /* Clear the data pool pointers Token */
- memset((void *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
- 0, sep->num_of_data_allocations*2*sizeof(u32));
-
- /* Check that all the DMA resources were freed */
- sep_free_dma_table_data_handler(sep);
-
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
-
- /*
- * We are now through with the transaction. Let's
- * allow other processes who have the device open
- * to perform transactions
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
- /* Raise event for stuck contextes */
- wake_up(&sep->event);
-
- return 0;
-}
-
-/**
- * sep_prepare_dcb_handler - prepare a control block
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* Command arguments */
- struct build_dcb_struct command_args;
-
- /* Get the command arguments */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct build_dcb_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
- command_args.app_in_address);
- dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
- command_args.app_out_address);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n",
- command_args.data_in_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n",
- command_args.block_size);
- dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
- command_args.tail_block_size);
-
- error = sep_prepare_input_output_dma_table_in_dcb(sep,
- (unsigned long)command_args.app_in_address,
- (unsigned long)command_args.app_out_address,
- command_args.data_in_size, command_args.block_size,
- command_args.tail_block_size, true, false);
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dcb_handler - free control block resources
- * @sep: pointer to struct sep_device
- *
- * This function frees the DCB resources and updates the needed
- * user-space buffers.
- */
-static int sep_free_dcb_handler(struct sep_device *sep)
-{
- return sep_free_dma_tables_and_dcb(sep, false, false);
-}
-
-/**
- * sep_rar_prepare_output_msg_handler - prepare an output message
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- /* Command args */
- struct rar_hndl_to_bus_struct command_args;
- /* Bus address */
- dma_addr_t rar_bus = 0;
- /* Holds the RAR address in the system memory offset */
- u32 *rar_addr;
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Call to translation function only if user handle is not NULL */
- if (command_args.rar_handle)
- return -EOPNOTSUPP;
- dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
-
- /* Set value in the SYSTEM MEMORY offset */
- rar_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- /* Copy the physical address to the System Area for the SEP */
- rar_addr[0] = SEP_RAR_VAL_TOKEN;
- rar_addr[1] = rar_bus;
-
-end_function:
- return error;
-}
-
-/**
- * sep_ioctl - ioctl api
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the ioctl methods available on the SEP device.
- */
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
- error = -EACCES;
- }
- mutex_unlock(&sep->sep_mutex);
-
- if (error)
- return error;
-
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Lock to prevent the daemon to interfere with operation */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* Send command to SEP */
- error = sep_send_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* Allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* Inform the SEP the bus address of the static pool */
- error = sep_get_static_pool_addr_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep);
- break;
- case SEP_IOCRARPREPAREMESSAGE:
- error = sep_rar_prepare_output_msg_handler(sep, arg);
- break;
- case SEP_IOCPREPAREDCB:
- error = sep_prepare_dcb_handler(sep, arg);
- break;
- case SEP_IOCFREEDCB:
- error = sep_free_dcb_handler(sep);
- break;
- default:
- error = -ENOTTY;
- break;
- }
-
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_singleton_ioctl - ioctl api for singleton interface
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the additional ioctls for the singleton device
- */
-static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
-{
- long error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for the SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
- mutex_unlock(&sep->sep_mutex);
- return -EACCES;
- }
-
- mutex_unlock(&sep->sep_mutex);
-
- switch (cmd) {
- case SEP_IOCTLSETCALLERID:
- mutex_lock(&sep->ioctl_mutex);
- error = sep_set_caller_id_handler(sep, arg);
- mutex_unlock(&sep->ioctl_mutex);
- break;
- default:
- error = sep_ioctl(filp, cmd, arg);
- break;
- }
- return error;
-}
-
-/**
- * sep_request_daemon_ioctl - ioctl for daemon
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Called by the request daemon to perform ioctls on the daemon device
- */
-static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
- unsigned long arg)
-{
-
- long error;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Only one process can access ioctl at any given time */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* Send reply command to SEP */
- error = sep_req_daemon_send_reply_command_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- /*
- * End req daemon transaction, do nothing
- * will be removed upon update in middleware
- * API library
- */
- error = 0;
- break;
- default:
- error = -ENOTTY;
- }
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_inthandler - interrupt handler
- * @irq: interrupt
- * @dev_id: device id
- */
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error = IRQ_HANDLED;
- unsigned long lck_flags;
- u32 reg_val, reg_val2 = 0;
- struct sep_device *sep = dev_id;
-
- /* Read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- if (reg_val & (0x1 << 13)) {
- /* Lock and update the counter of reply messages */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->reply_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Is this printf or daemon request? */
- reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "SEP Interrupt - reg2 is %08x\n", reg_val2);
-
- if ((reg_val2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "int: printf request\n");
- wake_up(&sep->event_request_daemon);
- } else if (reg_val2 >> 31) {
- dev_dbg(&sep->pdev->dev, "int: daemon request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
- wake_up(&sep->event);
- }
- } else {
- dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
- int_error = IRQ_NONE;
- }
- if (int_error == IRQ_HANDLED)
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-
- return int_error;
-}
-
-/**
- * sep_reconfig_shared_area - reconfigure shared area
- * @sep: pointer to struct sep_device
- *
- * Reconfig the shared area between HOST and SEP - needed in case
- * the DX_CC_Init function was called before OS loading.
- */
-static int sep_reconfig_shared_area(struct sep_device *sep)
-{
- int ret_val;
-
- /* use to limit waiting for SEP */
- unsigned long end_time;
-
- /* Send the new SHARED MESSAGE AREA to the SEP */
- dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
- (unsigned long long)sep->shared_bus);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* Poll for SEP response */
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- end_time = jiffies + (WAIT_TIME * HZ);
-
- while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
- (ret_val != sep->shared_bus))
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* Check the return value (register) */
- if (ret_val != sep->shared_bus) {
- dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
- dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
- ret_val = -ENOMEM;
- } else
- ret_val = 0;
-
- dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
- return ret_val;
-}
-
-/* File operation for singleton SEP operations */
-static const struct file_operations singleton_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_singleton_ioctl,
- .poll = sep_poll,
- .open = sep_singleton_open,
- .release = sep_singleton_release,
- .mmap = sep_mmap,
-};
-
-/* File operation for daemon operations */
-static const struct file_operations daemon_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_request_daemon_ioctl,
- .poll = sep_request_daemon_poll,
- .open = sep_request_daemon_open,
- .release = sep_request_daemon_release,
- .mmap = sep_request_daemon_mmap,
-};
-
-/* The files operations structure of the driver */
-static const struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-/**
- * sep_register_driver_with_fs - register misc devices
- * @sep: pointer to struct sep_device
- *
- * This function registers the driver with the file system
- */
-static int sep_register_driver_with_fs(struct sep_device *sep)
-{
- int ret_val;
-
- sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_sep.name = SEP_DEV_NAME;
- sep->miscdev_sep.fops = &sep_file_operations;
-
- sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
- sep->miscdev_singleton.fops = &singleton_file_operations;
-
- sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_daemon.name = SEP_DEV_DAEMON;
- sep->miscdev_daemon.fops = &daemon_file_operations;
-
- ret_val = misc_register(&sep->miscdev_sep);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
- ret_val);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_singleton);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_daemon);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
-
- return ret_val;
- }
- return ret_val;
-}
-
-
-/**
- * sep_probe - probe a matching PCI device
- * @pdev: pci_device
- * @end: pci_device_id
- *
- * Attempt to set up and configure a SEP device that has been
- * discovered by the PCI layer.
- */
-static int __devinit sep_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
-
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* Enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- dev_warn(&pdev->dev, "error enabling pci device\n");
- goto end_function;
- }
-
- /* Allocate the sep_device structure for this device */
- sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
- if (sep_dev == NULL) {
- dev_warn(&pdev->dev,
- "can't kmalloc the sep_device structure\n");
- error = -ENOMEM;
- goto end_function_disable_device;
- }
-
- /*
- * We're going to use another variable for actually
- * working with the device; this way, if we have
- * multiple devices in the future, it would be easier
- * to make appropriate changes
- */
- sep = sep_dev;
-
- sep->pdev = pci_dev_get(pdev);
-
- init_waitqueue_head(&sep->event);
- init_waitqueue_head(&sep->event_request_daemon);
- spin_lock_init(&sep->snd_rply_lck);
- mutex_init(&sep->sep_mutex);
- mutex_init(&sep->ioctl_mutex);
-
- dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
- dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
-
- /* Set up our register area */
- sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
- if (!sep->reg_physical_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register start\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
- if (!sep->reg_physical_end) {
- dev_warn(&sep->pdev->dev, "Error getting register end\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
- (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
- if (!sep->reg_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- dev_dbg(&sep->pdev->dev,
- "Register area start %llx end %llx virtual %p\n",
- (unsigned long long)sep->reg_physical_addr,
- (unsigned long long)sep->reg_physical_end,
- sep->reg_addr);
-
- /* Allocate the shared area */
- sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- if (sep_map_and_alloc_shared_area(sep)) {
- error = -ENOMEM;
- /* Allocation failed */
- goto end_function_error;
- }
-
- /* Clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* Set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- /* Read send/receive counters from SEP */
- sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- sep->reply_ct &= 0x3FFFFFFF;
- sep->send_ct = sep->reply_ct;
-
- /* Get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
- "sep_driver", sep);
-
- if (error)
- goto end_function_deallocate_sep_shared_area;
-
- /* The new chip requires a shared area reconfigure */
- if (sep->pdev->revision == 4) { /* Only for new chip */
- error = sep_reconfig_shared_area(sep);
- if (error)
- goto end_function_free_irq;
- }
- /* Finally magic up the device nodes */
- /* Register driver with the fs */
- error = sep_register_driver_with_fs(sep);
- if (error == 0)
- /* Success */
- return 0;
-
-end_function_free_irq:
- free_irq(pdev->irq, sep);
-
-end_function_deallocate_sep_shared_area:
- /* De-allocate shared area */
- sep_unmap_and_free_shared_area(sep);
-
-end_function_error:
- iounmap(sep->reg_addr);
-
-end_function_free_sep_dev:
- pci_dev_put(sep_dev->pdev);
- kfree(sep_dev);
- sep_dev = NULL;
-
-end_function_disable_device:
- pci_disable_device(pdev);
-
-end_function:
- return error;
-}
-
-static void sep_remove(struct pci_dev *pdev)
-{
- struct sep_device *sep = sep_dev;
-
- /* Unregister from fs */
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
- misc_deregister(&sep->miscdev_daemon);
-
- /* Free the irq */
- free_irq(sep->pdev->irq, sep);
-
- /* Free the shared area */
- sep_unmap_and_free_shared_area(sep_dev);
- iounmap((void *) sep_dev->reg_addr);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* Field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe,
- .remove = sep_remove
-};
-
-
-/**
- * sep_init - init function
- *
- * Module load time. Register the PCI device driver.
- */
-static int __init sep_init(void)
-{
- return pci_register_driver(&sep_pci_driver);
-}
-
-
-/**
- * sep_exit - called to unload driver
- *
- * Drop the misc devices then remove and unmap the various resources
- * that are not released by the driver remove method.
- */
-static void __exit sep_exit(void)
-{
- pci_unregister_driver(&sep_pci_driver);
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
index c3aacfcc8ac..8b797d5388b 100644
--- a/drivers/staging/sep/sep_driver_api.h
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -2,8 +2,8 @@
*
* sep_driver_api.h - Security Processor Driver api definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -37,26 +38,32 @@
#define SEP_DRIVER_SRC_REQ 2
#define SEP_DRIVER_SRC_PRINTF 3
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-struct alloc_struct {
- /* offset from start of shared pool area */
- u32 offset;
- /* number of bytes to allocate */
- u32 num_bytes;
-};
-
-/* command struct for getting caller id value and address */
-struct caller_id_struct {
- /* pid of the process */
- u32 pid;
- /* virtual address of the caller id hash */
- aligned_u64 callerIdAddress;
- /* caller id hash size in bytes */
- u32 callerIdSizeInBytes;
+/* Power state */
+#define SEP_DRIVER_POWERON 1
+#define SEP_DRIVER_POWEROFF 2
+
+/* Following enums are used only for kernel crypto api */
+enum type_of_request {
+ NO_REQUEST,
+ AES_CBC,
+ AES_ECB,
+ DES_CBC,
+ DES_ECB,
+ DES3_ECB,
+ DES3_CBC,
+ SHA1,
+ MD5,
+ SHA224,
+ SHA256
+ };
+
+enum hash_stage {
+ HASH_INIT,
+ HASH_UPDATE,
+ HASH_FINISH,
+ HASH_DIGEST,
+ HASH_FINUP_DATA,
+ HASH_FINUP_FINISH
};
/*
@@ -83,11 +90,6 @@ struct sep_dcblock {
u8 tail_data[68];
};
-struct sep_caller_id_entry {
- int pid;
- unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
-};
-
/*
command structure for building dcb block (currently for ext app only
*/
@@ -104,6 +106,33 @@ struct build_dcb_struct {
/* the size of the block of the operation - if needed,
every table will be modulo this parameter */
u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+};
+
+/*
+ command structure for building dcb block for kernel crypto
+*/
+struct build_dcb_struct_kernel {
+ /* address value of the data in */
+ void *app_in_address;
+ /* size of data in */
+ ssize_t data_in_size;
+ /* address of the data out */
+ void *app_out_address;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 block_size;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
/**
@@ -147,6 +176,10 @@ struct sep_dma_resource {
/* number of entries of the output mapp array */
u32 out_map_num_entries;
+
+ /* Scatter list for kernel operations */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
@@ -169,47 +202,201 @@ struct sep_lli_entry {
u32 block_size;
};
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
+/*
+ * header format for each fastcall write operation
+ */
+struct sep_fastcall_hdr {
+ u32 magic;
+ u32 secure_dma;
+ u32 msg_len;
+ u32 num_dcbs;
+};
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
+/*
+ * structure used in file pointer's private data field
+ * to track the status of the calls to the various
+ * driver interface
+ */
+struct sep_call_status {
+ unsigned long status;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 0)
+/*
+ * format of dma context buffer used to store all DMA-related
+ * context information of a particular transaction
+ */
+struct sep_dma_context {
+ /* number of data control blocks */
+ u32 nr_dcb_creat;
+ /* number of the lli tables created in the current transaction */
+ u32 num_lli_tables_created;
+ /* size of currently allocated dma tables region */
+ u32 dmatables_len;
+ /* size of input data */
+ u32 input_data_len;
+ /* secure dma use (for imr memory restriced area in output */
+ bool secure_dma;
+ struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+ /* Scatter gather for kernel crypto */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 1)
+/*
+ * format for file pointer's private_data field
+ */
+struct sep_private_data {
+ struct sep_queue_info *my_queue_elem;
+ struct sep_device *device;
+ struct sep_call_status call_status;
+ struct sep_dma_context *dma_ctx;
+};
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL \
- _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA \
- _IO(SEP_IOC_MAGIC_NUMBER, 7)
+/* Functions used by sep_crypto */
-/* get the static pool area addersses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR \
- _IO(SEP_IOC_MAGIC_NUMBER, 8)
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem);
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len);
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs);
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx);
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg);
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx);
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep);
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep);
+
+/**
+ * IOCTL command defines
+ */
+/* magic number 1 of the sep IOCTL command */
+#define SEP_IOC_MAGIC_NUMBER 's'
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPCOMMAND \
+ _IO(SEP_IOC_MAGIC_NUMBER, 0)
/* end transaction command */
#define SEP_IOCENDTRANSACTION \
_IO(SEP_IOC_MAGIC_NUMBER, 15)
-#define SEP_IOCRARPREPAREMESSAGE \
- _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
-
-#define SEP_IOCTLSETCALLERID \
- _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
-
#define SEP_IOCPREPAREDCB \
_IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
#define SEP_IOCFREEDCB \
_IO(SEP_IOC_MAGIC_NUMBER, 36)
+struct sep_device;
+
+#define SEP_IOCPREPAREDCB_SECURE_DMA \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 38, struct build_dcb_struct)
+
+#define SEP_IOCFREEDCB_SECURE_DMA \
+ _IO(SEP_IOC_MAGIC_NUMBER, 39)
+
#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
index d6bfd245522..fa7c0d09bfa 100644
--- a/drivers/staging/sep/sep_driver_config.h
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -2,8 +2,8 @@
*
* sep_driver_config.h - Security Processor Driver configuration
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.06.26 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -48,6 +49,8 @@
/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
#define SEP_DRIVER_ARM_DEBUG_MODE 0
+/* Critical message area contents for sanity checking */
+#define SEP_START_MSG_TOKEN 0x02558808
/*-------------------------------------------
INTERNAL DATA CONFIGURATION
-------------------------------------------*/
@@ -65,21 +68,17 @@
#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16
/* flag that signifies tah the lock is
-currently held by the process (struct file) */
+currently held by the proccess (struct file) */
#define SEP_DRIVER_OWN_LOCK_FLAG 1
/* flag that signifies tah the lock is currently NOT
-held by the process (struct file) */
+held by the proccess (struct file) */
#define SEP_DRIVER_DISOWN_LOCK_FLAG 0
/* indicates whether driver has mapped/unmapped shared area */
#define SEP_REQUEST_DAEMON_MAPPED 1
#define SEP_REQUEST_DAEMON_UNMAPPED 0
-#define SEP_DEV_NAME "sep_sec_driver"
-#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
-#define SEP_DEV_DAEMON "sep_req_daemon_driver"
-
/*--------------------------------------------------------
SHARED AREA memory total size is 36K
it is divided is following:
@@ -90,7 +89,7 @@ held by the process (struct file) */
}
DATA_POOL_AREA 12K }
- SYNCHRONIC_DMA_TABLES_AREA 5K
+ SYNCHRONIC_DMA_TABLES_AREA 29K
placeholder until drver changes
FLOW_DMA_TABLES_AREA 4K
@@ -109,6 +108,12 @@ held by the process (struct file) */
/*
+ the minimum length of the message - includes 2 reserved fields
+ at the start, then token, message size and opcode fields. all dwords
+*/
+#define SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES (5*sizeof(u32))
+
+/*
the maximum length of the message - the rest of the message shared
area will be dedicated to the dma lli tables
*/
@@ -124,7 +129,7 @@ held by the process (struct file) */
#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024)
/* the size of the message shared area in pages */
-#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5)
+#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 29)
/* Placeholder until driver changes */
#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
@@ -132,6 +137,9 @@ held by the process (struct file) */
/* system data (time, caller id etc') pool */
#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3)
+/* Offset of the sep printf buffer in the message area */
+#define SEP_DRIVER_PRINTF_OFFSET_IN_BYTES (5888)
+
/* the size in bytes of the time memory */
#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8
@@ -223,10 +231,10 @@ held by the process (struct file) */
#define SEP_ALREADY_INITIALIZED_ERR 12
/* bit that locks access to the shared area */
-#define SEP_MMAP_LOCK_BIT 0
+#define SEP_TRANSACTION_STARTED_LOCK_BIT 0
/* bit that lock access to the poll - after send_command */
-#define SEP_SEND_MSG_LOCK_BIT 1
+#define SEP_WORKING_LOCK_BIT 1
/* the token that defines the static pool address address */
#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA
@@ -240,4 +248,51 @@ held by the process (struct file) */
/* Time limit for SEP to finish */
#define WAIT_TIME 10
+/* Delay for pm runtime suspend (reduces pm thrashing with bursty traffic */
+#define SUSPEND_DELAY 10
+
+/* Number of delays to wait until scu boots after runtime resume */
+#define SCU_DELAY_MAX 50
+
+/* Delay for each iteration (usec) wait for scu boots after runtime resume */
+#define SCU_DELAY_ITERATION 10
+
+
+/*
+ * Bits used in struct sep_call_status to check that
+ * driver's APIs are called in valid order
+ */
+
+/* Bit offset which indicates status of sep_write() */
+#define SEP_FASTCALL_WRITE_DONE_OFFSET 0
+
+/* Bit offset which indicates status of sep_mmap() */
+#define SEP_LEGACY_MMAP_DONE_OFFSET 1
+
+/* Bit offset which indicates status of the SEP_IOCSENDSEPCOMMAND ioctl */
+#define SEP_LEGACY_SENDMSG_DONE_OFFSET 2
+
+/* Bit offset which indicates status of sep_poll() */
+#define SEP_LEGACY_POLL_DONE_OFFSET 3
+
+/* Bit offset which indicates status of the SEP_IOCENDTRANSACTION ioctl */
+#define SEP_LEGACY_ENDTRANSACTION_DONE_OFFSET 4
+
+/*
+ * Used to limit number of concurrent processes
+ * allowed to allocte dynamic buffers in fastcall
+ * interface.
+ */
+#define SEP_DOUBLEBUF_USERS_LIMIT 3
+
+/* Identifier for valid fastcall header */
+#define SEP_FC_MAGIC 0xFFAACCAA
+
+/*
+ * Used for enabling driver runtime power management.
+ * Useful for enabling/disabling it during performance
+ * testing
+ */
+#define SEP_ENABLE_RUNTIME_PM
+
#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
index 300f90963de..a6a44817038 100644
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -2,8 +2,8 @@
*
* sep_driver_hw_defs.h - Security Processor Driver hardware definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,15 +26,13 @@
* CHANGES:
*
* 2010.09.20 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
#ifndef SEP_DRIVER_HW_DEFS__H
#define SEP_DRIVER_HW_DEFS__H
-/* PCI ID's */
-#define MFLD_PCI_DEVICE_ID 0x0826
-
/*----------------------- */
/* HW Registers Defines. */
/* */
@@ -42,181 +40,9 @@
/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#define HW_HOST_IRR_REG_ADDR 0x0A00UL
#define HW_HOST_IMR_REG_ADDR 0x0A04UL
#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
@@ -225,9 +51,6 @@
#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
+#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
new file mode 100644
index 00000000000..ad54c2e5c93
--- /dev/null
+++ b/drivers/staging/sep/sep_main.c
@@ -0,0 +1,4518 @@
+/*
+ *
+ * sep_main.c - Security Processor Driver main group of functions
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
+ * 2011.02.22 Enable kernel crypto operation
+ *
+ * Please note that this driver is based on information in the Discretix
+ * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
+ * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
+ * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
+ * Overview and Integration Guide.
+ */
+/* #define DEBUG */
+/* #define SEP_PERF_DEBUG */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <asm/current.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/async.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+#define CREATE_TRACE_POINTS
+#include "sep_trace_events.h"
+
+/*
+ * Let's not spend cycles iterating over message
+ * area contents if debugging not enabled
+ */
+#ifdef DEBUG
+#define sep_dump_message(sep) _sep_dump_message(sep)
+#else
+#define sep_dump_message(sep)
+#endif
+
+/**
+ * Currenlty, there is only one SEP device per platform;
+ * In event platforms in the future have more than one SEP
+ * device, this will be a linked list
+ */
+
+struct sep_device *sep_dev;
+
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem)
+{
+ unsigned long lck_flags;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
+ current->pid);
+
+ if (!queue_elem || !(*queue_elem)) {
+ dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
+ current->pid, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+ list_del(&(*queue_elem)->list);
+ sep->sep_queue_num--;
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ kfree(*queue_elem);
+ *queue_elem = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
+ current->pid);
+ return;
+}
+
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len)
+{
+ unsigned long lck_flags;
+ struct sep_queue_info *my_elem = NULL;
+
+ my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
+
+ if (!my_elem)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
+
+ my_elem->data.opcode = opcode;
+ my_elem->data.size = size;
+ my_elem->data.pid = pid;
+
+ if (name_len > TASK_COMM_LEN)
+ name_len = TASK_COMM_LEN;
+
+ memcpy(&my_elem->data.name, name, name_len);
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ list_add_tail(&my_elem->list, &sep->sep_queue_status);
+ sep->sep_queue_num++;
+
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return my_elem;
+}
+
+/**
+ * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
+ * @sep: SEP device
+ * @dmatables_region: Destination pointer for the buffer
+ * @dma_ctx: DMA context for the transaction
+ * @table_count: Number of MLLI/DMA tables to create
+ * The buffer created will not work as-is for DMA operations,
+ * it needs to be copied over to the appropriate place in the
+ * shared area.
+ */
+static int sep_allocate_dmatables_region(struct sep_device *sep,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx,
+ const u32 table_count)
+{
+ const size_t new_len =
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ void *tmp_region = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
+ current->pid, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
+ current->pid, dmatables_region);
+
+ if (!dma_ctx || !dmatables_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma context/region uninitialized\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
+ current->pid, new_len);
+ dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
+ dma_ctx->dmatables_len);
+ tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
+ if (!tmp_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for dma tables region\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /* Were there any previous tables that need to be preserved ? */
+ if (*dmatables_region) {
+ memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+ }
+
+ *dmatables_region = tmp_region;
+
+ dma_ctx->dmatables_len += new_len;
+
+ return 0;
+}
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep)
+{
+ int error = 0;
+ DEFINE_WAIT(wait);
+
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, returning\n",
+ current->pid);
+ goto end_function_setpid;
+ }
+
+ /*
+ * Looping needed even for exclusive waitq entries
+ * due to process wakeup latencies, previous process
+ * might have already created another transaction.
+ */
+ for (;;) {
+ /*
+ * Exclusive waitq entry, so that only one process is
+ * woken up from the queue at a time.
+ */
+ prepare_to_wait_exclusive(&sep->event_transactions,
+ &wait,
+ TASK_INTERRUPTIBLE);
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, breaking\n",
+ current->pid);
+ break;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] transactions ongoing, sleeping\n",
+ current->pid);
+ schedule();
+ dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
+
+ if (signal_pending(current)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
+ current->pid);
+ error = -EINTR;
+ goto end_function;
+ }
+ }
+end_function_setpid:
+ /*
+ * The pid_doing_transaction indicates that this process
+ * now owns the facilities to performa a transaction with
+ * the SEP. While this process is performing a transaction,
+ * no other process who has the SEP device open can perform
+ * any transactions. This method allows more than one process
+ * to have the device open at any given time, which provides
+ * finer granularity for device utilization by multiple
+ * processes.
+ */
+ /* Only one process is able to progress here at a time */
+ sep->pid_doing_transaction = current->pid;
+
+end_function:
+ finish_wait(&sep->event_transactions, &wait);
+
+ return error;
+}
+
+/**
+ * sep_check_transaction_owner - Checks if current process owns transaction
+ * @sep: SEP device
+ */
+static inline int sep_check_transaction_owner(struct sep_device *sep)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
+ current->pid,
+ sep->pid_doing_transaction);
+
+ if ((sep->pid_doing_transaction == 0) ||
+ (current->pid != sep->pid_doing_transaction)) {
+ return -EACCES;
+ }
+
+ /* We own the transaction */
+ return 0;
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void _sep_dump_message(struct sep_device *sep)
+{
+ int count;
+
+ u32 *p = sep->shared_addr;
+
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x\n",
+ current->pid, count/4, *p++);
+}
+
+#endif
+
+/**
+ * sep_map_and_alloc_shared_area -allocate shared block
+ * @sep: security processor
+ * @size: size of shared area
+ */
+static int sep_map_and_alloc_shared_area(struct sep_device *sep)
+{
+ sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
+ sep->shared_size,
+ &sep->shared_bus, GFP_KERNEL);
+
+ if (!sep->shared_addr) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared memory dma_alloc_coherent failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
+ current->pid,
+ sep->shared_size, sep->shared_addr,
+ (unsigned long long)sep->shared_bus);
+ return 0;
+}
+
+/**
+ * sep_unmap_and_free_shared_area - free shared block
+ * @sep: security processor
+ */
+static void sep_unmap_and_free_shared_area(struct sep_device *sep)
+{
+ dma_free_coherent(&sep->pdev->dev, sep->shared_size,
+ sep->shared_addr, sep->shared_bus);
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_shared_bus_to_virt - convert bus/virt addresses
+ * @sep: pointer to struct sep_device
+ * @bus_address: address to convert
+ *
+ * Returns virtual address inside the shared area according
+ * to the bus address.
+ */
+static void *sep_shared_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ return sep->shared_addr + (bus_address - sep->shared_bus);
+}
+
+#endif
+
+/**
+ * sep_open - device open method
+ * @inode: inode of SEP device
+ * @filp: file handle to SEP device
+ *
+ * Open method for the SEP device. Called when userspace opens
+ * the SEP device node.
+ *
+ * Returns zero on success otherwise an error code.
+ */
+static int sep_open(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep;
+ struct sep_private_data *priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -ENOTSUPP;
+
+ /*
+ * Get the SEP device structure and use it for the
+ * private_data field in filp for other methods
+ */
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sep = sep_dev;
+ priv->device = sep;
+ filp->private_data = priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
+ current->pid, priv);
+
+ /* Anyone can open; locking takes place at transaction level */
+ return 0;
+}
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ int count;
+ int dcb_counter;
+ /* Pointer to the current dma_resource struct */
+ struct sep_dma_resource *dma;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler\n",
+ current->pid);
+
+ if (!dma_ctx || !(*dma_ctx)) {
+ /* No context or context already freed */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no DMA context or context already freed\n",
+ current->pid);
+
+ return 0;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ for (dcb_counter = 0;
+ dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
+ dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
+
+ /* Unmap and free input map array */
+ if (dma->in_map_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->in_map_array[count].dma_addr,
+ dma->in_map_array[count].size,
+ DMA_TO_DEVICE);
+ }
+ kfree(dma->in_map_array);
+ }
+
+ /**
+ * Output is handled different. If
+ * this was a secure dma into restricted memory,
+ * then we skip this step altogether as restricted
+ * memory is not available to the o/s at all.
+ */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_map_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->out_map_array[count].dma_addr,
+ dma->out_map_array[count].size,
+ DMA_FROM_DEVICE);
+ }
+ kfree(dma->out_map_array);
+ }
+
+ /* Free page cache for output */
+ if (dma->in_page_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ flush_dcache_page(dma->in_page_array[count]);
+ page_cache_release(dma->in_page_array[count]);
+ }
+ kfree(dma->in_page_array);
+ }
+
+ /* Again, we do this only for non secure dma */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_page_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ if (!PageReserved(dma->out_page_array[count]))
+
+ SetPageDirty(dma->
+ out_page_array[count]);
+
+ flush_dcache_page(dma->out_page_array[count]);
+ page_cache_release(dma->out_page_array[count]);
+ }
+ kfree(dma->out_page_array);
+ }
+
+ /**
+ * Note that here we use in_map_num_entries because we
+ * don't have a page array; the page array is generated
+ * only in the lock_user_pages, which is not called
+ * for kernel crypto, which is what the sg (scatter gather
+ * is used for exclusively
+ */
+ if (dma->src_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
+ dma->in_map_num_entries, DMA_TO_DEVICE);
+ dma->src_sg = NULL;
+ }
+
+ if (dma->dst_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
+ dma->in_map_num_entries, DMA_FROM_DEVICE);
+ dma->dst_sg = NULL;
+ }
+
+ /* Reset all the values */
+ dma->in_page_array = NULL;
+ dma->out_page_array = NULL;
+ dma->in_num_pages = 0;
+ dma->out_num_pages = 0;
+ dma->in_map_array = NULL;
+ dma->out_map_array = NULL;
+ dma->in_map_num_entries = 0;
+ dma->out_map_num_entries = 0;
+ }
+
+ (*dma_ctx)->nr_dcb_creat = 0;
+ (*dma_ctx)->num_lli_tables_created = 0;
+
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler end\n",
+ current->pid);
+
+ return 0;
+}
+
+/**
+ * sep_end_transaction_handler - end transaction
+ * @sep: pointer to struct sep_device
+ * @dma_ctx: DMA context
+ * @call_status: Call status
+ *
+ * This API handles the end transaction request.
+ */
+static int sep_end_transaction_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx,
+ struct sep_call_status *call_status,
+ struct sep_queue_info **my_queue_elem)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
+
+ /*
+ * Extraneous transaction clearing would mess up PM
+ * device usage counters and SEP would get suspended
+ * just before we send a command to SEP in the next
+ * transaction
+ * */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
+ current->pid);
+ return 0;
+ }
+
+ /* Update queue status */
+ sep_queue_status_remove(sep, my_queue_elem);
+
+ /* Check that all the DMA resources were freed */
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ /* Reset call status for next transaction */
+ if (call_status)
+ call_status->status = 0;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* start suspend delay */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ pm_runtime_put_autosuspend(&sep->pdev->dev);
+ }
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+ sep->pid_doing_transaction = 0;
+
+ /* Now it's safe for next process to proceed */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
+ current->pid);
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
+ wake_up(&sep->event_transactions);
+
+ return 0;
+}
+
+
+/**
+ * sep_release - close a SEP device
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP device.
+ */
+static int sep_release(struct inode *inode, struct file *filp)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
+
+ sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+
+ kfree(filp->private_data);
+
+ return 0;
+}
+
+/**
+ * sep_mmap - maps the shared area to user space
+ * @filp: pointer to struct file
+ * @vma: pointer to vm_area_struct
+ *
+ * Called on an mmap of our space via the normal SEP device
+ */
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ dma_addr_t bus_addr;
+ unsigned long error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
+
+ /* Set the transaction busy (own the device) */
+ /*
+ * Problem for multithreaded applications is that here we're
+ * possibly going to sleep while holding a write lock on
+ * current->mm->mmap_sem, which will cause deadlock for ongoing
+ * transaction trying to create DMA tables
+ */
+ error = sep_wait_transaction(sep);
+ if (error)
+ /* Interrupted by signal, don't clear transaction */
+ goto end_function;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /*
+ * Check that the size of the mapped range is as the size of the message
+ * shared area
+ */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ error = -EINVAL;
+ goto end_function_with_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ /* Get bus address */
+ bus_addr = sep->shared_bus;
+
+ if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
+ current->pid);
+ error = -EAGAIN;
+ goto end_function_with_error;
+ }
+
+ /* Update call status */
+ set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
+
+ goto end_function;
+
+end_function_with_error:
+ /* Clear our transaction */
+ sep_end_transaction_handler(sep, NULL, call_status,
+ my_queue_elem);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_poll - poll handler
+ * @filp: pointer to struct file
+ * @wait: pointer to poll_table
+ *
+ * Called by the OS when the kernel is asked to do a poll on
+ * a SEP file handle.
+ */
+static unsigned int sep_poll(struct file *filp, poll_table *wait)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ u32 mask = 0;
+ u32 retval = 0;
+ u32 retval2 = 0;
+ unsigned long lock_irq_flag;
+
+ /* Am I the process that owns the transaction? */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* Check if send command or send_reply were activated previously */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+
+ /* Add the event to the polling wait table */
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
+ current->pid);
+
+ poll_wait(filp, &sep->event_interrupt, wait);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Check if error occured during poll */
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if ((retval2 != 0x0) && (retval2 != 0x8)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
+ current->pid, retval2);
+ mask |= POLLERR;
+ goto end_function;
+ }
+
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+
+ if (sep->send_ct == sep->reply_ct) {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: data ready check (GPR2) %x\n",
+ current->pid, retval);
+
+ /* Check if printf request */
+ if ((retval >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP printf request\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check if the this is SEP reply or request */
+ if (retval >> 31) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP request\n",
+ current->pid);
+ } else {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: normal return\n",
+ current->pid);
+ sep_dump_message(sep);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
+ current->pid);
+ mask |= POLLIN | POLLRDNORM;
+ }
+ set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
+ } else {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; no reply; returning mask of 0\n",
+ current->pid);
+ mask = 0;
+ }
+
+end_function:
+ return mask;
+}
+
+/**
+ * sep_time_address - address in SEP memory of time
+ * @sep: SEP device we want the address from
+ *
+ * Return the address of the two dwords in memory used for time
+ * setting.
+ */
+static u32 *sep_time_address(struct sep_device *sep)
+{
+ return sep->shared_addr +
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+}
+
+/**
+ * sep_set_time - set the SEP time
+ * @sep: the SEP we are setting the time for
+ *
+ * Calculates time and sets it at the predefined address.
+ * Called with the SEP mutex held.
+ */
+static unsigned long sep_set_time(struct sep_device *sep)
+{
+ struct timeval time;
+ u32 *time_addr; /* Address of time as seen by the kernel */
+
+
+ do_gettimeofday(&time);
+
+ /* Set value in the SYSTEM MEMORY offset */
+ time_addr = sep_time_address(sep);
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
+ current->pid, time.tv_sec);
+ dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
+ current->pid, time_addr);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ return time.tv_sec;
+}
+
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep)
+{
+ unsigned long lock_irq_flag;
+ u32 *msg_pool;
+ int error = 0;
+
+ /* Basic sanity check; set msg pool to start of shared area */
+ msg_pool = (u32 *)sep->shared_addr;
+ msg_pool += 2;
+
+ /* Look for start msg token */
+ if (*msg_pool != SEP_START_MSG_TOKEN) {
+ dev_warn(&sep->pdev->dev, "start message token not present\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Do we have a reasonable size? */
+ msg_pool += 1;
+ if ((*msg_pool < 2) ||
+ (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "invalid message size\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Does the command look reasonable? */
+ msg_pool += 1;
+ if (*msg_pool < 2) {
+ dev_warn(&sep->pdev->dev, "invalid message opcode\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
+ current->pid,
+ sep->pdev->dev.power.runtime_status);
+ sep->in_use = 1; /* device is about to be used */
+ pm_runtime_get_sync(&sep->pdev->dev);
+#endif
+
+ if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
+ error = -EPROTO;
+ goto end_function;
+ }
+ sep->in_use = 1; /* device is about to be used */
+ sep_set_time(sep);
+
+ sep_dump_message(sep);
+
+ /* Update counter */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->send_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Send interrupt to SEP */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_crypto_dma -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the dma table from the scatterlist
+ * It is used only for kernel crypto as it works with scatterlists
+ * representation of data buffers
+ *
+ */
+static int sep_crypto_dma(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **dma_maps,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *temp_sg;
+
+ u32 count_segment;
+ u32 count_mapped;
+ struct sep_dma_map *sep_dma;
+ int ct1;
+
+ if (sg->length == 0)
+ return 0;
+
+ /* Count the segments */
+ temp_sg = sg;
+ count_segment = 0;
+ while (temp_sg) {
+ count_segment += 1;
+ temp_sg = scatterwalk_sg_next(temp_sg);
+ }
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x segments in sg\n", count_segment);
+
+ /* DMA map segments */
+ count_mapped = dma_map_sg(&sep->pdev->dev, sg,
+ count_segment, direction);
+
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x maps in sg\n", count_mapped);
+
+ if (count_mapped == 0) {
+ dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
+ return -ENOMEM;
+ }
+
+ sep_dma = kmalloc(sizeof(struct sep_dma_map) *
+ count_mapped, GFP_ATOMIC);
+
+ if (sep_dma == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(sg, temp_sg, count_mapped, ct1) {
+ sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
+ sep_dma[ct1].size = sg_dma_len(temp_sg);
+ dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
+ ct1, (unsigned long)sep_dma[ct1].dma_addr,
+ (unsigned long)sep_dma[ct1].size);
+ }
+
+ *dma_maps = sep_dma;
+ return count_mapped;
+
+}
+
+/**
+ * sep_crypto_lli -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @data_size: total data size
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @lli_maps: pointer to place a pointer to array of lli maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the LLI table from the scatterlist
+ * It is only used for kernel crypto as it works exclusively
+ * with scatterlists (struct scatterlist) representation of
+ * data buffers
+ */
+static int sep_crypto_lli(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **maps,
+ struct sep_lli_entry **llis,
+ u32 data_size,
+ enum dma_data_direction direction)
+{
+
+ int ct1;
+ struct sep_lli_entry *sep_lli;
+ struct sep_dma_map *sep_map;
+
+ int nbr_ents;
+
+ nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
+ if (nbr_ents <= 0) {
+ dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
+ nbr_ents);
+ return nbr_ents;
+ }
+
+ sep_map = *maps;
+
+ sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
+
+ if (sep_lli == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
+
+ kfree(*maps);
+ *maps = NULL;
+ return -ENOMEM;
+ }
+
+ for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
+ sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
+
+ /* Maximum for page is total data size */
+ if (sep_map[ct1].size > data_size)
+ sep_map[ct1].size = data_size;
+
+ sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
+ }
+
+ *llis = sep_lli;
+ return nbr_ents;
+}
+
+/**
+ * sep_lock_kernel_pages - map kernel pages for DMA
+ * @sep: pointer to struct sep_device
+ * @kernel_virt_addr: address of data buffer in kernel
+ * @data_size: size of data
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input into device or output from device
+ *
+ * This function locks all the physical pages of the kernel virtual buffer
+ * and construct a basic lli array, where each entry holds the physical
+ * page address and the size that application data holds in this page
+ * This function is used only during kernel crypto mod calls from within
+ * the kernel (when ioctl is not used)
+ *
+ * This is used only for kernel crypto. Kernel pages
+ * are handled differently as they are done via
+ * scatter gather lists (struct scatterlist)
+ */
+static int sep_lock_kernel_pages(struct sep_device *sep,
+ unsigned long kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ u32 num_pages;
+ struct scatterlist *sg;
+
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ enum dma_data_direction direction;
+
+ lli_array = NULL;
+ map_array = NULL;
+
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ direction = DMA_TO_DEVICE;
+ sg = dma_ctx->src_sg;
+ } else {
+ direction = DMA_FROM_DEVICE;
+ sg = dma_ctx->dst_sg;
+ }
+
+ num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
+ data_size, direction);
+
+ if (num_pages <= 0) {
+ dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
+ num_pages);
+ return -ENOMEM;
+ }
+
+ /* Put mapped kernel sg into kernel resource array */
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
+ dma_ctx->src_sg;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
+ dma_ctx->dst_sg;
+ }
+
+ return 0;
+}
+
+/**
+ * sep_lock_user_pages - lock and map user pages for DMA
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input or output to device
+ *
+ * This function locks all the physical pages of the application
+ * virtual buffer and construct a basic lli array, where each entry
+ * holds the physical page address and the size that application
+ * data holds in this physical pages
+ */
+static int sep_lock_user_pages(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ int result;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of pointers to page */
+ struct page **page_array;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lock user pages app_virt_addr is %x\n",
+ current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ /* Allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for map_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ /* Convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, app_virt_addr,
+ num_pages,
+ ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
+ 0, page_array, NULL);
+
+ up_read(&current->mm->mmap_sem);
+
+ /* Check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] not all pages locked by get_user_pages, "
+ "result 0x%X, num_pages 0x%X\n",
+ current->pid, result, num_pages);
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
+ current->pid);
+
+ /*
+ * Fill the array using page array data and
+ * map the pages - this action will also flush the cache as needed
+ */
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(&sep->pdev->dev, page_array[count],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ map_array[count].size = PAGE_SIZE;
+
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n", current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
+ }
+ goto end_function;
+
+end_function_with_error3:
+ /* Free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+ kfree(map_array);
+
+end_function_with_error1:
+ /* Free page array */
+ kfree(page_array);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_lli_table_secure_dma - get lli array for IMR addresses
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: not used
+ * @dma_ctx: pointer to struct sep_dma_context
+ *
+ * This function creates lli tables for outputting data to
+ * IMR memory, which is memory that cannot be accessed by the
+ * the x86 processor.
+ */
+static int sep_lli_table_secure_dma(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
+ " app_virt_addr is %x\n", current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /*
+ * Fill the lli_array
+ */
+ start_page = start_page << PAGE_SHIFT;
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = start_page;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ start_page += PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n"
+ "lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n"
+ "lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid, num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
+
+ return error;
+}
+
+/**
+ * sep_calculate_lli_table_max_size - size the LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_in_array_ptr
+ * @num_array_entries
+ * @last_table_flag
+ *
+ * This function calculates the size of data that can be inserted into
+ * the lli table from this array, such that either the table is full
+ * (all entries are entered), or there are no more entries in the
+ * lli array
+ */
+static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array_ptr,
+ u32 num_array_entries,
+ u32 *last_table_flag)
+{
+ u32 counter;
+ /* Table data size */
+ u32 table_data_size = 0;
+ /* Data size for the next table */
+ u32 next_table_data_size;
+
+ *last_table_flag = 0;
+
+ /*
+ * Calculate the data in the out lli table till we fill the whole
+ * table or till the data has ended
+ */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ /*
+ * Check if we reached the last entry,
+ * meaning this ia the last table to build,
+ * and no need to check the block alignment
+ */
+ if (counter == num_array_entries) {
+ /* Set the last table flag */
+ *last_table_flag = 1;
+ goto end_function;
+ }
+
+ /*
+ * Calculate the data size of the next table.
+ * Stop if no entries left or if data size is more the DMA restriction
+ */
+ next_table_data_size = 0;
+ for (; counter < num_array_entries; counter++) {
+ next_table_data_size += lli_in_array_ptr[counter].block_size;
+ if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+ break;
+ }
+
+ /*
+ * Check if the next table data size is less then DMA rstriction.
+ * if it is - recalculate the current table size, so that the next
+ * table data size will be adaquete for DMA
+ */
+ if (next_table_data_size &&
+ next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+
+ table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
+ next_table_data_size);
+
+end_function:
+ return table_data_size;
+}
+
+/**
+ * sep_build_lli_table - build an lli array for the given table
+ * @sep: pointer to struct sep_device
+ * @lli_array_ptr: pointer to lli array
+ * @lli_table_ptr: pointer to lli table
+ * @num_processed_entries_ptr: pointer to number of entries
+ * @num_table_entries_ptr: pointer to number of tables
+ * @table_data_size: total data size
+ *
+ * Builds ant lli table from the lli_array according to
+ * the given size of data
+ */
+static void sep_build_lli_table(struct sep_device *sep,
+ struct sep_lli_entry *lli_array_ptr,
+ struct sep_lli_entry *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* Current table data size */
+ u32 curr_table_data_size;
+ /* Counter of lli array entry */
+ u32 array_counter;
+
+ /* Init current table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] build lli table table_data_size: (hex) %x\n",
+ current->pid, table_data_size);
+
+ /* Fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* Update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ cpu_to_le32(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr is %p\n",
+ current->pid, lli_table_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address: %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid, lli_table_ptr->block_size);
+
+ /* Check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] curr_table_data_size too large\n",
+ current->pid);
+
+ /* Update the size of block in the table */
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_table_ptr->block_size) -
+ (curr_table_data_size - table_data_size);
+
+ /* Update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ cpu_to_le32(lli_table_ptr->block_size);
+
+ /* Update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* Advance to the next entry in the lli_array */
+ array_counter++;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ /* Move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* Set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+}
+
+/**
+ * sep_shared_area_virt_to_bus - map shared area to bus address
+ * @sep: pointer to struct sep_device
+ * @virt_address: virtual address to convert
+ *
+ * This functions returns the physical address inside shared area according
+ * to the virtual address. It can be either on the externa RAM device
+ * (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
+ void *virt_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
+ current->pid, virt_address);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
+ current->pid,
+ (unsigned long)
+ sep->shared_bus + (virt_address - sep->shared_addr));
+
+ return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
+}
+
+/**
+ * sep_shared_area_bus_to_virt - map shared area bus address to kernel
+ * @sep: pointer to struct sep_device
+ * @bus_address: bus address to convert
+ *
+ * This functions returns the virtual address inside shared area
+ * according to the physical address. It can be either on the
+ * externa RAM device (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
+ current->pid,
+ (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
+ (size_t)(bus_address - sep->shared_bus)));
+
+ return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
+}
+
+/**
+ * sep_debug_print_lli_tables - dump LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_ptr: pointer to sep_lli_entry
+ * @num_table_entries: number of entries
+ * @table_data_size: total data size
+ *
+ * Walk the the list of the print created tables and print all the data
+ */
+static void sep_debug_print_lli_tables(struct sep_device *sep,
+ struct sep_lli_entry *lli_table_ptr,
+ unsigned long num_table_entries,
+ unsigned long table_data_size)
+{
+#ifdef DEBUG
+ unsigned long table_count = 1;
+ unsigned long entries_count = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
+ current->pid);
+ if (num_table_entries == 0) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
+ current->pid);
+ return;
+ }
+
+ while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli table %08lx, "
+ "table_data_size is (hex) %lx\n",
+ current->pid, table_count, table_data_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] num_table_entries is (hex) %lx\n",
+ current->pid, num_table_entries);
+
+ /* Print entries of the table (without info entry) */
+ for (entries_count = 0; entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr address is %08lx\n",
+ current->pid,
+ (unsigned long) lli_table_ptr);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys address is %08lx "
+ "block size is (hex) %x\n", current->pid,
+ (unsigned long)lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+
+ /* Point to the info entry */
+ lli_table_ptr--;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->block_size "
+ "is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->physical_address "
+ "is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+
+ table_data_size = lli_table_ptr->block_size & 0xffffff;
+ num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys table_data_size is "
+ "(hex) %lx num_table_entries is"
+ " %lx bus_address is%lx\n",
+ current->pid,
+ table_data_size,
+ num_table_entries,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry *)
+ sep_shared_bus_to_virt(sep,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ table_count++;
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
+ current->pid);
+#endif
+}
+
+
+/**
+ * sep_prepare_empty_lli_table - create a blank LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_addr_ptr: pointer to lli table
+ * @num_entries_ptr: pointer to number of entries
+ * @table_data_size_ptr: point to table data size
+ * @dmatables_region: Optional buffer for DMA tables
+ * @dma_ctx: DMA context
+ *
+ * This function creates empty lli tables when there is no data
+ */
+static void sep_prepare_empty_lli_table(struct sep_device *sep,
+ dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ struct sep_lli_entry *lli_table_ptr;
+
+ /* Find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (dmatables_region && *dmatables_region)
+ lli_table_ptr = *dmatables_region;
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter value */
+ *lli_table_addr_ptr = sep->shared_bus +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created *
+ sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+}
+
+/**
+ * sep_prepare_input_dma_table - prepare input DMA mappings
+ * @sep: pointer to struct sep_device
+ * @data_size:
+ * @block_size:
+ * @lli_table_ptr:
+ * @num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data (kernel cryptio call)
+ *
+ * This function prepares only input DMA table for synhronic symmetric
+ * operations (HASH)
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_dma_table(struct sep_device *sep,
+ unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx
+)
+{
+ int error = 0;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_entry_ptr;
+ /* Array of pointers to page */
+ struct sep_lli_entry *lli_array_ptr;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_entry = 0;
+ /* Num entries in the virtual buffer */
+ u32 sep_lli_entries = 0;
+ /* Lli table pointer */
+ struct sep_lli_entry *in_lli_table_ptr;
+ /* The total data in one table */
+ u32 table_data_size = 0;
+ /* Flag for last table */
+ u32 last_table_flag = 0;
+ /* Number of entries in lli table */
+ u32 num_entries_in_table = 0;
+ /* Next table address */
+ void *lli_table_alloc_addr = NULL;
+ void *dma_lli_table_alloc_addr = NULL;
+ void *dma_in_lli_table_ptr = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
+ "tbl data size: (hex) %x\n",
+ current->pid, data_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
+ current->pid, block_size);
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
+
+ /* Set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 1);
+ if (error)
+ return error;
+ }
+ /* Special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(sep, lli_table_ptr,
+ num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+ goto update_dcb_counter;
+ }
+
+ /* Check if the pages are in Kernel Virtual Address layout */
+ if (is_kva == true)
+ error = sep_lock_kernel_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ else
+ /*
+ * Lock the pages of the user buffer
+ * and translate them to pages
+ */
+ error = sep_lock_user_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+
+ if (error)
+ goto end_function;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output sep_in_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = NULL;
+
+ sep_lli_entries =
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
+
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ sep_lli_entries);
+ if (error)
+ return error;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are processed */
+ while (current_entry < sep_lli_entries) {
+
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ if (dma_lli_table_alloc_addr >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ error = -ENOMEM;
+ goto end_function_error;
+
+ }
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+
+ /* Calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(sep,
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry),
+ &last_table_flag);
+
+ /*
+ * If this is not the last table -
+ * then allign it to the block size
+ */
+ if (!last_table_flag)
+ table_data_size =
+ (table_data_size / block_size) * block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output table_data_size is (hex) %x\n",
+ current->pid,
+ table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry, &num_entries_in_table, table_data_size);
+
+ if (info_entry_ptr == NULL) {
+
+ /* Set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_ptr);
+
+ } else {
+ /* Update the info entry of the previous in table */
+ info_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ info_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+ /* Save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
+ *num_entries_ptr, *table_data_size_ptr);
+ }
+
+ /* The array of the pages */
+ kfree(lli_array_ptr);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+ goto end_function;
+
+end_function_error:
+ /* Free all the allocated resources */
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(lli_array_ptr);
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
+ * @sep: pointer to struct sep_device
+ * @lli_in_array:
+ * @sep_in_lli_entries:
+ * @lli_out_array:
+ * @sep_out_lli_entries
+ * @block_size
+ * @lli_table_in_ptr
+ * @lli_table_out_ptr
+ * @in_num_entries_ptr
+ * @out_num_entries_ptr
+ * @table_data_size_ptr
+ *
+ * This function creates the input and output DMA tables for
+ * symmetric operations (AES/DES) according to the block
+ * size from LLI arays
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_construct_dma_tables_from_lli(
+ struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ /* Points to the area where next lli table can be allocated */
+ void *lli_table_alloc_addr = NULL;
+ /*
+ * Points to the area in shared region where next lli table
+ * can be allocated
+ */
+ void *dma_lli_table_alloc_addr = NULL;
+ /* Input lli table in dmatables_region or shared region */
+ struct sep_lli_entry *in_lli_table_ptr = NULL;
+ /* Input lli table location in the shared region */
+ struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
+ /* Output lli table in dmatables_region or shared region */
+ struct sep_lli_entry *out_lli_table_ptr = NULL;
+ /* Output lli table location in the shared region */
+ struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_in_entry_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_out_entry_ptr = NULL;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry = 0;
+ /* Points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry = 0;
+ /* Max size of the input table */
+ u32 in_table_data_size = 0;
+ /* Max size of the output table */
+ u32 out_table_data_size = 0;
+ /* Flag te signifies if this is the last tables build */
+ u32 last_table_flag = 0;
+ /* The data size that should be in table */
+ u32 table_data_size = 0;
+ /* Number of etnries in the input table */
+ u32 num_entries_in_table = 0;
+ /* Number of etnries in the output table */
+ u32 num_entries_out_table = 0;
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* Initiate to point after the message area */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ (dma_ctx->num_lli_tables_created *
+ (sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+
+ if (dmatables_region) {
+ /* 2 for both in+out table */
+ if (sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 2*sep_in_lli_entries))
+ return -ENOMEM;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the first output tables */
+ out_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_out_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ /* Check if the DMA table area limit was overrun */
+ if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
+ return -ENOMEM;
+ }
+
+ /* Update the number of the lli tables created */
+ dma_ctx->num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry),
+ &last_table_flag);
+
+ /* Calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry),
+ &last_table_flag);
+
+ if (!last_table_flag) {
+ in_table_data_size = (in_table_data_size /
+ block_size) * block_size;
+ out_table_data_size = (out_table_data_size /
+ block_size) * block_size;
+ }
+
+ table_data_size = in_table_data_size;
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ " in_table_data_size is (hex) %x\n", current->pid,
+ in_table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ "out_table_data_size is (hex) %x\n", current->pid,
+ out_table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* Construct output lli table */
+ sep_build_lli_table(sep, &lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* If info entry is null - this is the first table built */
+ if (info_in_entry_ptr == NULL) {
+ /* Set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_in_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_out_ptr);
+ } else {
+ /* Update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+
+ /* Update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) |
+ (table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr->bus_address,
+ info_in_entry_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr:"
+ "%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr->bus_address,
+ info_out_entry_ptr->block_size);
+ }
+
+ /* Save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr +
+ num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr +
+ num_entries_out_table - 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output num_entries_out_table is %x\n",
+ current->pid,
+ (u32)num_entries_out_table);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_in_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_out_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr);
+ }
+
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ /* Print output tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ return 0;
+}
+
+/**
+ * sep_prepare_input_output_dma_table - prepare DMA I/O table
+ * @app_virt_in_addr:
+ * @app_virt_out_addr:
+ * @data_size:
+ * @block_size:
+ * @lli_table_in_ptr:
+ * @lli_table_out_ptr:
+ * @in_num_entries_ptr:
+ * @out_num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data; used only for kernel crypto module
+ *
+ * This function builds input and output DMA tables for synhronic
+ * symmetric operations (AES, DES, HASH). It also checks that each table
+ * is of the modular block size
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table(struct sep_device *sep,
+ unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_in_array;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_out_array;
+
+ if (!dma_ctx) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (data_size == 0) {
+ /* Prepare empty table for input and output */
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(
+ sep,
+ dmatables_region,
+ dma_ctx,
+ 2);
+ if (error)
+ goto end_function;
+ }
+ sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
+ in_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
+ out_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ goto update_dcb_counter;
+ }
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+
+ /* Lock the pages of the buffer and translate them to pages */
+ if (is_kva == true) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for output "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function_free_lli_in;
+ }
+
+ }
+
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
+ current->pid);
+ error = sep_lock_user_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ if (dma_ctx->secure_dma == true) {
+ /* secure_dma requires use of non accessible memory */
+ dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
+ current->pid);
+ error = sep_lli_table_secure_dma(sep,
+ app_virt_out_addr, data_size, &lli_out_array,
+ SEP_DRIVER_OUT_FLAG, dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] secure dma table setup "
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ } else {
+ /* For normal, non-secure dma */
+ dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
+ current->pid);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Locking user output pages\n",
+ current->pid);
+
+ error = sep_lock_user_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages"
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ }
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
+ "table sep_in_num_pages is (hex) %x\n", current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
+ " is (hex) %x\n", current->pid,
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ /* Call the fucntion that creates table from the lli arrays */
+ dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
+ current->pid);
+ error = sep_construct_dma_tables_from_lli(
+ sep, lli_in_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ in_num_pages,
+ lli_out_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_num_pages,
+ block_size, lli_table_in_ptr, lli_table_out_ptr,
+ in_num_entries_ptr, out_num_entries_ptr,
+ table_data_size_ptr, dmatables_region, dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_construct_dma_tables_from_lli failed\n",
+ current->pid);
+ goto end_function_with_error;
+ }
+
+ kfree(lli_out_array);
+ kfree(lli_in_array);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+
+ goto end_function;
+
+end_function_with_error:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ kfree(lli_out_array);
+
+
+end_function_free_lli_in:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ kfree(lli_in_array);
+
+end_function:
+
+ return error;
+
+}
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg)
+{
+ int error = 0;
+ /* Size of tail */
+ u32 tail_size = 0;
+ /* Address of the created DCB table */
+ struct sep_dcblock *dcb_table_ptr = NULL;
+ /* The physical address of the first input DMA table */
+ dma_addr_t in_first_mlli_address = 0;
+ /* Number of entries in the first input DMA table */
+ u32 in_first_num_entries = 0;
+ /* The physical address of the first output DMA table */
+ dma_addr_t out_first_mlli_address = 0;
+ /* Number of entries in the first output DMA table */
+ u32 out_first_num_entries = 0;
+ /* Data in the first input/output table */
+ u32 first_data_size = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
+ current->pid, app_in_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
+ current->pid, app_out_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
+ current->pid, data_in_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
+ current->pid, block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
+ current->pid, tail_block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
+ current->pid, isapplet);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
+ current->pid, is_kva);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
+ current->pid, src_sg);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
+ current->pid, dst_sg);
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (*dma_ctx) {
+ /* In case there are multiple DCBs for this transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
+ current->pid);
+ } else {
+ *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
+ if (!(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Not enough memory for DMA context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Created DMA context addr at 0x%p\n",
+ current->pid, *dma_ctx);
+ }
+
+ (*dma_ctx)->secure_dma = secure_dma;
+
+ /* these are for kernel crypto only */
+ (*dma_ctx)->src_sg = src_sg;
+ (*dma_ctx)->dst_sg = dst_sg;
+
+ if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
+ /* No more DCBs to allocate */
+ dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
+ current->pid);
+ error = -ENOSPC;
+ goto end_function_error;
+ }
+
+ /* Allocate new DCB */
+ if (dcb_region) {
+ dcb_table_ptr = dcb_region;
+ } else {
+ dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
+ ((*dma_ctx)->nr_dcb_creat *
+ sizeof(struct sep_dcblock)));
+ }
+
+ /* Set the default values in the DCB */
+ dcb_table_ptr->input_mlli_address = 0;
+ dcb_table_ptr->input_mlli_num_entries = 0;
+ dcb_table_ptr->input_mlli_data_size = 0;
+ dcb_table_ptr->output_mlli_address = 0;
+ dcb_table_ptr->output_mlli_num_entries = 0;
+ dcb_table_ptr->output_mlli_data_size = 0;
+ dcb_table_ptr->tail_data_size = 0;
+ dcb_table_ptr->out_vr_tail_pt = 0;
+
+ if (isapplet == true) {
+
+ /* Check if there is enough data for DMA operation */
+ if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)app_in_address,
+ data_in_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+
+ dcb_table_ptr->tail_data_size = data_in_size;
+
+ /* Set the output user-space address for mem2mem op */
+ if (app_out_address)
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address;
+
+ /*
+ * Update both data length parameters in order to avoid
+ * second data copy and allow building of empty mlli
+ * tables
+ */
+ tail_size = 0x0;
+ data_in_size = 0x0;
+
+ } else {
+ if (!app_out_address) {
+ tail_size = data_in_size % block_size;
+ if (!tail_size) {
+ if (tail_block_size == block_size)
+ tail_size = block_size;
+ }
+ } else {
+ tail_size = 0;
+ }
+ }
+ if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ /* We have tail data - copy it to DCB */
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)(app_in_address +
+ data_in_size - tail_size), tail_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+ if (app_out_address)
+ /*
+ * Calculate the output address
+ * according to tail data size
+ */
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address +
+ data_in_size - tail_size;
+
+ /* Save the real tail data size */
+ dcb_table_ptr->tail_data_size = tail_size;
+ /*
+ * Update the data size without the tail
+ * data size AKA data for the dma
+ */
+ data_in_size = (data_in_size - tail_size);
+ }
+ }
+ /* Check if we need to build only input table or input/output */
+ if (app_out_address) {
+ /* Prepare input/output tables */
+ error = sep_prepare_input_output_dma_table(sep,
+ app_in_address,
+ app_out_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &out_first_mlli_address,
+ &in_first_num_entries,
+ &out_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ } else {
+ /* Prepare input tables */
+ error = sep_prepare_input_dma_table(sep,
+ app_in_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &in_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ }
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "prepare DMA table call failed "
+ "from prepare DCB call\n");
+ goto end_function_error;
+ }
+
+ /* Set the DCB values */
+ dcb_table_ptr->input_mlli_address = in_first_mlli_address;
+ dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
+ dcb_table_ptr->input_mlli_data_size = first_data_size;
+ dcb_table_ptr->output_mlli_address = out_first_mlli_address;
+ dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
+ dcb_table_ptr->output_mlli_data_size = first_data_size;
+
+ goto end_function;
+
+end_function_error:
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+end_function:
+ return error;
+
+}
+
+
+/**
+ * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
+ * @sep: pointer to struct sep_device
+ * @isapplet: indicates external application (used for kernel access)
+ * @is_kva: indicates kernel addresses (only used for kernel crypto)
+ *
+ * This function frees the DMA tables and DCB
+ */
+static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
+ bool is_kva, struct sep_dma_context **dma_ctx)
+{
+ struct sep_dcblock *dcb_table_ptr;
+ unsigned long pt_hold;
+ void *tail_pt;
+
+ int i = 0;
+ int error = 0;
+ int error_temp = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
+ current->pid);
+
+ if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
+ current->pid);
+
+ /* Tail stuff is only for non secure_dma */
+ /* Set pointer to first DCB table */
+ dcb_table_ptr = (struct sep_dcblock *)
+ (sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
+
+ /**
+ * Go over each DCB and see if
+ * tail pointer must be updated
+ */
+ for (i = 0; dma_ctx && *dma_ctx &&
+ i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
+ if (dcb_table_ptr->out_vr_tail_pt) {
+ pt_hold = (unsigned long)dcb_table_ptr->
+ out_vr_tail_pt;
+ tail_pt = (void *)pt_hold;
+ if (is_kva == true) {
+ error = -ENODEV;
+ break;
+ } else {
+ error_temp = copy_to_user(
+ (void __user *)tail_pt,
+ dcb_table_ptr->tail_data,
+ dcb_table_ptr->tail_data_size);
+ }
+ if (error_temp) {
+ /* Release the DMA resource */
+ error = -EFAULT;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Free the output pages, if any */
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
+ current->pid);
+
+ return error;
+}
+
+/**
+ * sep_prepare_dcb_handler - prepare a control block
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ * @secure_dma: indicate whether we are using secure_dma on IMR
+ *
+ * This function will retrieve the RAR buffer physical addresses, type
+ * & size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
+ bool secure_dma,
+ struct sep_dma_context **dma_ctx)
+{
+ int error;
+ /* Command arguments */
+ static struct build_dcb_struct command_args;
+
+ /* Get the command arguments */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct build_dcb_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] prep dcb handler app_in_address is %08llx\n",
+ current->pid, command_args.app_in_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] app_out_address is %08llx\n",
+ current->pid, command_args.app_out_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] data_size is %x\n",
+ current->pid, command_args.data_in_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] block_size is %x\n",
+ current->pid, command_args.block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] tail block_size is %x\n",
+ current->pid, command_args.tail_block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] is_applet is %x\n",
+ current->pid, command_args.is_applet);
+
+ if (!command_args.app_in_address) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null app_in_address\n", current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)command_args.app_in_address,
+ (unsigned long)command_args.app_out_address,
+ command_args.data_in_size, command_args.block_size,
+ command_args.tail_block_size,
+ command_args.is_applet, false,
+ secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_free_dcb_handler - free control block resources
+ * @sep: pointer to struct sep_device
+ *
+ * This function frees the DCB resources and updates the needed
+ * user-space buffers.
+ */
+static int sep_free_dcb_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ if (!dma_ctx || !(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no dma context defined, nothing to free\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
+}
+
+/**
+ * sep_ioctl - ioctl handler for sep device
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Implement the ioctl methods availble on the SEP device.
+ */
+static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
+ current->pid, cmd);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
+ current->pid, *dma_ctx);
+
+ /* Make sure we own this device */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check that sep_mmap has been called before */
+ if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
+ &call_status->status)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] mmap not called\n", current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Check that the command is for SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] send msg already done\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (!error)
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
+ current->pid);
+ break;
+ case SEP_IOCENDTRANSACTION:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION start\n",
+ current->pid);
+ error = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION end\n",
+ current->pid);
+ break;
+ case SEP_IOCPREPAREDCB:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB start\n",
+ current->pid);
+ case SEP_IOCPREPAREDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb prep needed before send msg\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ if (!arg) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb null arg\n", current->pid);
+ error = EINVAL;
+ goto end_function;
+ }
+
+ if (cmd == SEP_IOCPREPAREDCB) {
+ /* No secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, false,
+ dma_ctx);
+ } else {
+ /* Secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOC_POC (with secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, true,
+ dma_ctx);
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
+ current->pid);
+ break;
+ case SEP_IOCFREEDCB:
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
+ current->pid);
+ case SEP_IOCFREEDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
+ current->pid);
+ error = sep_free_dcb_handler(sep, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
+ current->pid);
+ break;
+ default:
+ error = -ENOTTY;
+ dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
+ current->pid);
+ break;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
+
+ return error;
+}
+
+/**
+ * sep_inthandler - interrupt handler for sep device
+ * @irq: interrupt
+ * @dev_id: device id
+ */
+static irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ unsigned long lock_irq_flag;
+ u32 reg_val, reg_val2 = 0;
+ struct sep_device *sep = dev_id;
+ irqreturn_t int_error = IRQ_HANDLED;
+
+ /* Are we in power save? */
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
+ dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
+ return IRQ_NONE;
+ }
+#endif
+
+ if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
+ dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
+ return IRQ_NONE;
+ }
+
+ /* Read the IRR register to check if this is SEP interrupt */
+ reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
+
+ dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
+
+ if (reg_val & (0x1 << 13)) {
+
+ /* Lock and update the counter of reply messages */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->reply_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Is this a kernel client request */
+ if (sep->in_kernel) {
+ tasklet_schedule(&sep->finish_tasklet);
+ goto finished_interrupt;
+ }
+
+ /* Is this printf or daemon request? */
+ reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "SEP Interrupt - GPR2 is %08x\n", reg_val2);
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+
+ if ((reg_val2 >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "int: printf request\n");
+ } else if (reg_val2 >> 31) {
+ dev_dbg(&sep->pdev->dev, "int: daemon request\n");
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
+ wake_up(&sep->event_interrupt);
+ }
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
+ int_error = IRQ_NONE;
+ }
+
+finished_interrupt:
+
+ if (int_error == IRQ_HANDLED)
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
+
+ return int_error;
+}
+
+/**
+ * sep_reconfig_shared_area - reconfigure shared area
+ * @sep: pointer to struct sep_device
+ *
+ * Reconfig the shared area between HOST and SEP - needed in case
+ * the DX_CC_Init function was called before OS loading.
+ */
+static int sep_reconfig_shared_area(struct sep_device *sep)
+{
+ int ret_val;
+
+ /* use to limit waiting for SEP */
+ unsigned long end_time;
+
+ /* Send the new SHARED MESSAGE AREA to the SEP */
+ dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
+ (unsigned long long)sep->shared_bus);
+
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
+
+ /* Poll for SEP response */
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ end_time = jiffies + (WAIT_TIME * HZ);
+
+ while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
+ (ret_val != sep->shared_bus))
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ /* Check the return value (register) */
+ if (ret_val != sep->shared_bus) {
+ dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
+ dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
+ ret_val = -ENOMEM;
+ } else
+ ret_val = 0;
+
+ dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
+
+ return ret_val;
+}
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ void *dmaregion_free_start = NULL;
+ void *dmaregion_free_end = NULL;
+ void *dcbregion_free_start = NULL;
+ void *dcbregion_free_end = NULL;
+ ssize_t error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
+ current->pid);
+
+ if (1 > dma_ctx->nr_dcb_creat) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs to activate 0x%08X\n",
+ current->pid, dma_ctx->nr_dcb_creat);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dmaregion_free_start = sep->shared_addr
+ + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
+ dmaregion_free_end = dmaregion_free_start
+ + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ if (dmaregion_free_start
+ + dma_ctx->dmatables_len > dmaregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ memcpy(dmaregion_free_start,
+ *dmatables_region,
+ dma_ctx->dmatables_len);
+ /* Free MLLI table copy */
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ /* Copy thread's DCB table copy to DCB table region */
+ dcbregion_free_start = sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
+ dcbregion_free_end = dcbregion_free_start +
+ (SEP_MAX_NUM_SYNC_DMA_OPS *
+ sizeof(struct sep_dcblock)) - 1;
+
+ if (dcbregion_free_start
+ + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
+ > dcbregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ memcpy(dcbregion_free_start,
+ *dcb_region,
+ dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
+
+ /* Print the tables */
+ dev_dbg(&sep->pdev->dev, "activate: input table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->input_mlli_address),
+ (*dcb_region)->input_mlli_num_entries,
+ (*dcb_region)->input_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev, "activate: output table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->output_mlli_address),
+ (*dcb_region)->output_mlli_num_entries,
+ (*dcb_region)->output_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] printing activated tables\n", current->pid);
+
+end_function:
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ kfree(*dcb_region);
+ *dcb_region = NULL;
+
+ return error;
+}
+
+/**
+ * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * @secure_dma: Indicate use of IMR restricted memory secure dma
+ */
+static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct __user *user_dcb_args,
+ const u32 num_dcbs, bool secure_dma)
+{
+ int error = 0;
+ int i = 0;
+ struct build_dcb_struct *dcb_args = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
+ GFP_KERNEL);
+ if (!dcb_args) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ if (copy_from_user(dcb_args,
+ user_dcb_args,
+ num_dcbs * sizeof(struct build_dcb_struct))) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_args[i].app_in_address,
+ (unsigned long)dcb_args[i].app_out_address,
+ dcb_args[i].data_in_size,
+ dcb_args[i].block_size,
+ dcb_args[i].tail_block_size,
+ dcb_args[i].is_applet,
+ false, secure_dma,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ NULL,
+ NULL);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+
+ if (dcb_args[i].app_in_address != 0)
+ (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
+ }
+
+end_function:
+ kfree(dcb_args);
+ return error;
+
+}
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * This does that same thing as sep_create_dcb_dmatables_context
+ * except that it is used only for the kernel crypto operation. It is
+ * separate because there is no user data involved; the dcb data structure
+ * is specific for kernel crypto (build_dcb_struct_kernel)
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs)
+{
+ int error = 0;
+ int i = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
+ current->pid, num_dcbs);
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_data->app_in_address,
+ (unsigned long)dcb_data->app_out_address,
+ dcb_data->data_in_size,
+ dcb_data->block_size,
+ dcb_data->tail_block_size,
+ dcb_data->is_applet,
+ true,
+ false,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ dcb_data->src_sg,
+ dcb_data->dst_sg);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+ }
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_activate_msgarea_context - Takes the message area context into use
+ * @sep: SEP device
+ * @msg_region: Message area context buf
+ * @msg_len: Message area context buffer size
+ */
+static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const size_t msg_len)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
+ current->pid);
+
+ if (!msg_region || !(*msg_region) ||
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid act msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ return -EINVAL;
+ }
+
+ memcpy(sep->shared_addr, *msg_region, msg_len);
+
+ return 0;
+}
+
+/**
+ * sep_create_msgarea_context - Creates message area context
+ * @sep: SEP device
+ * @msg_region: Msg area region buf to create for current transaction
+ * @msg_user: Content for msg area region from user
+ * @msg_len: Message area size
+ */
+static ssize_t sep_create_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const void __user *msg_user,
+ const size_t msg_len)
+{
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
+ current->pid);
+
+ if (!msg_region ||
+ !msg_user ||
+ SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid creat msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for message buffer */
+ *msg_region = kzalloc(msg_len, GFP_KERNEL);
+ if (!(*msg_region)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for msgarea context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Copy input data to write() to allocated message buffer */
+ if (copy_from_user(*msg_region, msg_user, msg_len)) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+end_function:
+ if (error && msg_region) {
+ kfree(*msg_region);
+ *msg_region = NULL;
+ }
+
+ return error;
+}
+
+
+/**
+ * sep_read - Returns results of an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for storing results
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support reading in chunks, all data must be
+ * consumed during a single read system call.
+ */
+static ssize_t sep_read(struct file *filp,
+ char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ ssize_t error = 0, error_tmp = 0;
+
+ /* Am I the process that owns the transaction? */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Checks that user has called necessarry apis */
+ if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] fastcall write not called\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function_error;
+ }
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function_error;
+ }
+
+
+ /* Wait for SEP to finish */
+ wait_event(sep->event_interrupt,
+ test_bit(SEP_WORKING_LOCK_BIT,
+ &sep->in_use_flags) == 0);
+
+ sep_dump_message(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
+ current->pid, count_user);
+
+ /* In case user has allocated bigger buffer */
+ if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
+ count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
+
+ if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
+ error = count_user;
+
+end_function_error:
+ /* Copy possible tail data to user and free DCB and MLLIs */
+ error_tmp = sep_free_dcb_handler(sep, dma_ctx);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
+ current->pid);
+
+ /* End the transaction, wakeup pending ones */
+ error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] ending transaction failed\n",
+ current->pid);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_fastcall_args_get - Gets fastcall params from user
+ * sep: SEP device
+ * @args: Parameters buffer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ */
+static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
+ struct sep_fastcall_hdr *args,
+ const char __user *buf_user,
+ const size_t count_user)
+{
+ ssize_t error = 0;
+ size_t actual_count = 0;
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (count_user < sizeof(struct sep_fastcall_hdr)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] too small message size 0x%08zX\n",
+ current->pid, count_user);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+
+ if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (SEP_FC_MAGIC != args->magic) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid fastcall magic 0x%08X\n",
+ current->pid, args->magic);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
+ current->pid, args->num_dcbs);
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
+ current->pid, args->msg_len);
+
+ if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid message length\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ actual_count = sizeof(struct sep_fastcall_hdr)
+ + args->msg_len
+ + (args->num_dcbs * sizeof(struct build_dcb_struct));
+
+ if (actual_count != count_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] inconsistent message "
+ "sizes 0x%08zX vs 0x%08zX\n",
+ current->pid, actual_count, count_user);
+ error = -EMSGSIZE;
+ goto end_function;
+ }
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_write - Starts an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support writing in chunks,
+ * all data must be given during a single write system call.
+ */
+static ssize_t sep_write(struct file *filp,
+ const char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context *dma_ctx = NULL;
+ struct sep_fastcall_hdr call_hdr = {0};
+ void *msg_region = NULL;
+ void *dmatables_region = NULL;
+ struct sep_dcblock *dcb_region = NULL;
+ ssize_t error = 0;
+ struct sep_queue_info *my_queue_elem = NULL;
+ bool my_secure_dma; /* are we using secure_dma (IMR)? */
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
+ current->pid, sep);
+ dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
+ current->pid, private_data);
+
+ error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
+ if (error)
+ goto end_function;
+
+ buf_user += sizeof(struct sep_fastcall_hdr);
+
+ if (call_hdr.secure_dma == 0)
+ my_secure_dma = false;
+ else
+ my_secure_dma = true;
+
+ /*
+ * Controlling driver memory usage by limiting amount of
+ * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
+ * of threads can progress further at a time
+ */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
+ "region access\n", current->pid);
+ error = down_interruptible(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
+ current->pid);
+ if (error) {
+ /* Signal received */
+ goto end_function_error;
+ }
+
+
+ /*
+ * Prepare contents of the shared area regions for
+ * the operation into temporary buffers
+ */
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_create_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ &dma_ctx,
+ (const struct build_dcb_struct __user *)
+ buf_user,
+ call_hdr.num_dcbs, my_secure_dma);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
+ }
+
+ error = sep_create_msgarea_context(sep,
+ &msg_region,
+ buf_user,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
+ current->pid);
+ my_queue_elem = sep_queue_status_add(sep,
+ ((struct sep_msgarea_hdr *)msg_region)->opcode,
+ (dma_ctx) ? dma_ctx->input_data_len : 0,
+ current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!my_queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ "status error\n", current->pid);
+ error = -ENOMEM;
+ goto end_function_error_doublebuf;
+ }
+
+ /* Wait until current process gets the transaction */
+ error = sep_wait_transaction(sep);
+
+ if (error) {
+ /* Interrupted by signal, don't clear transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
+ current->pid);
+ sep_queue_status_remove(sep, &my_queue_elem);
+ goto end_function_error_doublebuf;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
+ current->pid);
+ private_data->my_queue_elem = my_queue_elem;
+
+ /* Activate shared area regions for the transaction */
+ error = sep_activate_msgarea_context(sep, &msg_region,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ sep_dump_message(sep);
+
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_activate_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ dma_ctx);
+ if (error)
+ goto end_function_error_clear_transact;
+ }
+
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ /* Store DMA context for the transaction */
+ private_data->dma_ctx = dma_ctx;
+ /* Update call status */
+ set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
+ error = count_user;
+
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+ goto end_function;
+
+end_function_error_clear_transact:
+ sep_end_transaction_handler(sep, &dma_ctx, call_status,
+ &private_data->my_queue_elem);
+
+end_function_error_doublebuf:
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+end_function_error:
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, &dma_ctx);
+
+end_function:
+ kfree(dcb_region);
+ kfree(dmatables_region);
+ kfree(msg_region);
+
+ return error;
+}
+/**
+ * sep_seek - Handler for seek system call
+ * @filp: File pointer
+ * @offset: File offset
+ * @origin: Options for offset
+ *
+ * Fastcall interface does not support seeking, all reads
+ * and writes are from/to offset zero
+ */
+static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
+{
+ return -ENOSYS;
+}
+
+
+
+/**
+ * sep_file_operations - file operation on sep device
+ * @sep_ioctl: ioctl handler from user space call
+ * @sep_poll: poll handler
+ * @sep_open: handles sep device open request
+ * @sep_release:handles sep device release request
+ * @sep_mmap: handles memory mapping requests
+ * @sep_read: handles read request on sep device
+ * @sep_write: handles write request on sep device
+ * @sep_seek: handles seek request on sep device
+ */
+static const struct file_operations sep_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+ .read = sep_read,
+ .write = sep_write,
+ .llseek = sep_seek,
+};
+
+/**
+ * sep_sysfs_read - read sysfs entry per gives arguments
+ * @filp: file pointer
+ * @kobj: kobject pointer
+ * @attr: binary file attributes
+ * @buf: read to this buffer
+ * @pos: offset to read
+ * @count: amount of data to read
+ *
+ * This function is to read sysfs entries for sep driver per given arguments.
+ */
+static ssize_t
+sep_sysfs_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ unsigned long lck_flags;
+ size_t nleft = count;
+ struct sep_device *sep = sep_dev;
+ struct sep_queue_info *queue_elem = NULL;
+ u32 queue_num = 0;
+ u32 i = 1;
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ queue_num = sep->sep_queue_num;
+ if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
+ queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
+
+
+ if (count < sizeof(queue_num)
+ + (queue_num * sizeof(struct sep_queue_data))) {
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+ return -EINVAL;
+ }
+
+ memcpy(buf, &queue_num, sizeof(queue_num));
+ buf += sizeof(queue_num);
+ nleft -= sizeof(queue_num);
+
+ list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
+ if (i++ > queue_num)
+ break;
+
+ memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
+ nleft -= sizeof(queue_elem->data);
+ buf += sizeof(queue_elem->data);
+ }
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return count - nleft;
+}
+
+/**
+ * bin_attributes - defines attributes for queue_status
+ * @attr: attributes (name & permissions)
+ * @read: function pointer to read this file
+ * @size: maxinum size of binary attribute
+ */
+static const struct bin_attribute queue_status = {
+ .attr = {.name = "queue_status", .mode = 0444},
+ .read = sep_sysfs_read,
+ .size = sizeof(u32)
+ + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
+};
+
+/**
+ * sep_register_driver_with_fs - register misc devices
+ * @sep: pointer to struct sep_device
+ *
+ * This function registers the driver with the file system
+ */
+static int sep_register_driver_with_fs(struct sep_device *sep)
+{
+ int ret_val;
+
+ sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_sep.name = SEP_DEV_NAME;
+ sep->miscdev_sep.fops = &sep_file_operations;
+
+ ret_val = misc_register(&sep->miscdev_sep);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
+ &queue_status);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ return ret_val;
+}
+
+
+/**
+ *sep_probe - probe a matching PCI device
+ *@pdev: pci_device
+ *@ent: pci_device_id
+ *
+ *Attempt to set up and configure a SEP device that has been
+ *discovered by the PCI layer. Allocates all required resources.
+ */
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int error = 0;
+ struct sep_device *sep = NULL;
+
+ if (sep_dev != NULL) {
+ dev_dbg(&pdev->dev, "only one SEP supported.\n");
+ return -EBUSY;
+ }
+
+ /* Enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ dev_warn(&pdev->dev, "error enabling pci device\n");
+ goto end_function;
+ }
+
+ /* Allocate the sep_device structure for this device */
+ sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
+ if (sep_dev == NULL) {
+ dev_warn(&pdev->dev,
+ "can't kmalloc the sep_device structure\n");
+ error = -ENOMEM;
+ goto end_function_disable_device;
+ }
+
+ /*
+ * We're going to use another variable for actually
+ * working with the device; this way, if we have
+ * multiple devices in the future, it would be easier
+ * to make appropriate changes
+ */
+ sep = sep_dev;
+
+ sep->pdev = pci_dev_get(pdev);
+
+ init_waitqueue_head(&sep->event_transactions);
+ init_waitqueue_head(&sep->event_interrupt);
+ spin_lock_init(&sep->snd_rply_lck);
+ spin_lock_init(&sep->sep_queue_lock);
+ sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
+
+ INIT_LIST_HEAD(&sep->sep_queue_status);
+
+ dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
+ "device being prepared\n");
+
+ /* Set up our register area */
+ sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
+ if (!sep->reg_physical_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register start\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
+ if (!sep->reg_physical_end) {
+ dev_warn(&sep->pdev->dev, "Error getting register end\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
+ (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
+ if (!sep->reg_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "Register area start %llx end %llx virtual %p\n",
+ (unsigned long long)sep->reg_physical_addr,
+ (unsigned long long)sep->reg_physical_end,
+ sep->reg_addr);
+
+ /* Allocate the shared area */
+ sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+ if (sep_map_and_alloc_shared_area(sep)) {
+ error = -ENOMEM;
+ /* Allocation failed */
+ goto end_function_error;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ /* Get the interrupt line */
+ error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", sep);
+
+ if (error)
+ goto end_function_deallocate_sep_shared_area;
+
+ /* The new chip requires a shared area reconfigure */
+ error = sep_reconfig_shared_area(sep);
+ if (error)
+ goto end_function_free_irq;
+
+ sep->in_use = 1;
+
+ /* Finally magic up the device nodes */
+ /* Register driver with the fs */
+ error = sep_register_driver_with_fs(sep);
+
+ if (error) {
+ dev_err(&sep->pdev->dev, "error registering dev file\n");
+ goto end_function_free_irq;
+ }
+
+ sep->in_use = 0; /* through touching the device */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ pm_runtime_put_noidle(&sep->pdev->dev);
+ pm_runtime_allow(&sep->pdev->dev);
+ pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
+ SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sep->pdev->dev);
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ sep->power_save_setup = 1;
+#endif
+ /* register kernel crypto driver */
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+ error = sep_crypto_setup();
+ if (error) {
+ dev_err(&sep->pdev->dev, "crypto setup failed\n");
+ goto end_function_free_irq;
+ }
+#endif
+ goto end_function;
+
+end_function_free_irq:
+ free_irq(pdev->irq, sep);
+
+end_function_deallocate_sep_shared_area:
+ /* De-allocate shared area */
+ sep_unmap_and_free_shared_area(sep);
+
+end_function_error:
+ iounmap(sep->reg_addr);
+
+end_function_free_sep_dev:
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+
+end_function_disable_device:
+ pci_disable_device(pdev);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_remove - handles removing device from pci subsystem
+ * @pdev: pointer to pci device
+ *
+ * This function will handle removing our sep device from pci subsystem on exit
+ * or unloading this module. It should free up all used resources, and unmap if
+ * any memory regions mapped.
+ */
+static void sep_remove(struct pci_dev *pdev)
+{
+ struct sep_device *sep = sep_dev;
+
+ /* Unregister from fs */
+ misc_deregister(&sep->miscdev_sep);
+
+ /* Unregister from kernel crypto */
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+ sep_crypto_takedown();
+#endif
+ /* Free the irq */
+ free_irq(sep->pdev->irq, sep);
+
+ /* Free the shared area */
+ sep_unmap_and_free_shared_area(sep_dev);
+ iounmap(sep_dev->reg_addr);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_forbid(&sep->pdev->dev);
+ pm_runtime_get_noresume(&sep->pdev->dev);
+ }
+#endif
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+}
+
+/* Initialize struct pci_device_id for our driver */
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
+ {0}
+};
+
+/* Export our pci_device_id structure to user space */
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+
+/**
+ * sep_pm_resume - rsume routine while waking up from S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to wake up sep driver while system awakes from S3
+ * state i.e. suspend to ram. The RAM in intact.
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters.
+ */
+static int sep_pci_resume(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci resume called\n");
+
+ if (sep->power_state == SEP_DRIVER_POWERON)
+ return 0;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ sep->power_state = SEP_DRIVER_POWERON;
+
+ return 0;
+}
+
+/**
+ * sep_pm_suspend - suspend routine while going to S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to suspend sep driver while system goes to S3
+ * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
+ * Notes - revisit with more understanding of pm, ICR/IMR
+ */
+static int sep_pci_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci suspend called\n");
+ if (sep->in_use == 1)
+ return -EAGAIN;
+
+ sep->power_state = SEP_DRIVER_POWEROFF;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR to block all */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_resume - runtime resume routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters
+ */
+static int sep_pm_runtime_resume(struct device *dev)
+{
+
+ u32 retval2;
+ u32 delay_count;
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
+
+ /**
+ * Wait until the SCU boot is ready
+ * This is done by iterating SCU_DELAY_ITERATION (10
+ * microseconds each) up to SCU_DELAY_MAX (50) times.
+ * This bit can be set in a random time that is less
+ * than 500 microseconds after each power resume
+ */
+ retval2 = 0;
+ delay_count = 0;
+ while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ retval2 &= 0x00000008;
+ if (!retval2) {
+ udelay(SCU_DELAY_ITERATION);
+ delay_count += 1;
+ }
+ }
+
+ if (!retval2) {
+ dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
+ return -EINVAL;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_suspend - runtime suspend routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm
+ */
+static int sep_pm_runtime_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+ return 0;
+}
+
+/**
+ * sep_pm - power management for sep driver
+ * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
+ * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
+ * @sep_pci_suspend: suspend - main memory is still ON
+ * @sep_pci_resume: resume - main meory is still ON
+ */
+static const struct dev_pm_ops sep_pm = {
+ .runtime_resume = sep_pm_runtime_resume,
+ .runtime_suspend = sep_pm_runtime_suspend,
+ .resume = sep_pci_resume,
+ .suspend = sep_pci_suspend,
+};
+#endif /* SEP_ENABLE_RUNTIME_PM */
+
+/**
+ * sep_pci_driver - registers this device with pci subsystem
+ * @name: name identifier for this driver
+ * @sep_pci_id_tbl: pointer to struct pci_device_id table
+ * @sep_probe: pointer to probe function in PCI driver
+ * @sep_remove: pointer to remove function in PCI driver
+ */
+static struct pci_driver sep_pci_driver = {
+#ifdef SEP_ENABLE_RUNTIME_PM
+ .driver = {
+ .pm = &sep_pm,
+ },
+#endif
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe,
+ .remove = sep_remove
+};
+
+/**
+ * sep_init - init function
+ *
+ * Module load time. Register the PCI device driver.
+ */
+
+static int __init sep_init(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
+
+/**
+ * sep_exit - called to unload driver
+ *
+ * Unregister the driver The device will perform all the cleanup required.
+ */
+static void __exit sep_exit(void)
+{
+ pci_unregister_driver(&sep_pci_driver);
+}
+
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_trace_events.h b/drivers/staging/sep/sep_trace_events.h
new file mode 100644
index 00000000000..2b053a93afe
--- /dev/null
+++ b/drivers/staging/sep/sep_trace_events.h
@@ -0,0 +1,188 @@
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/debug/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called trace-events-sample.h but we want the system
+ * to be called "sample". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE trace-events-sample
+ *
+ * As we do an the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sep
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_TRACE_SEP_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SEP_EVENTS_H
+
+#ifdef SEP_PERF_DEBUG
+#define SEP_TRACE_FUNC_IN() trace_sep_func_start(__func__, 0)
+#define SEP_TRACE_FUNC_OUT(branch) trace_sep_func_end(__func__, branch)
+#define SEP_TRACE_EVENT(branch) trace_sep_misc_event(__func__, branch)
+#else
+#define SEP_TRACE_FUNC_IN()
+#define SEP_TRACE_FUNC_OUT(branch)
+#define SEP_TRACE_EVENT(branch)
+#endif
+
+
+/*
+ * All trace headers should include tracepoint.h, until we finally
+ * make it into a standard header.
+ */
+#include <linux/tracepoint.h>
+
+/*
+ * The TRACE_EVENT macro is broken up into 5 parts.
+ *
+ * name: name of the trace point. This is also how to enable the tracepoint.
+ * A function called trace_foo_bar() will be created.
+ *
+ * proto: the prototype of the function trace_foo_bar()
+ * Here it is trace_foo_bar(char *foo, int bar).
+ *
+ * args: must match the arguments in the prototype.
+ * Here it is simply "foo, bar".
+ *
+ * struct: This defines the way the data will be stored in the ring buffer.
+ * There are currently two types of elements. __field and __array.
+ * a __field is broken up into (type, name). Where type can be any
+ * type but an array.
+ * For an array. there are three fields. (type, name, size). The
+ * type of elements in the array, the name of the field and the size
+ * of the array.
+ *
+ * __array( char, foo, 10) is the same as saying char foo[10].
+ *
+ * fast_assign: This is a C like function that is used to store the items
+ * into the ring buffer.
+ *
+ * printk: This is a way to print out the data in pretty print. This is
+ * useful if the system crashes and you are logging via a serial line,
+ * the data can be printed to the console using this "printk" method.
+ *
+ * Note, that for both the assign and the printk, __entry is the handler
+ * to the data structure in the ring buffer, and is defined by the
+ * TP_STRUCT__entry.
+ */
+TRACE_EVENT(sep_func_start,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_start %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_func_end,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_end %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_misc_event,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("misc_event %s %d", __entry->name, __entry->branch)
+);
+
+
+#endif
+
+/***** NOTICE! The #if protection ends here. *****/
+
+
+/*
+ * There are several ways I could have done this. If I left out the
+ * TRACE_INCLUDE_PATH, then it would default to the kernel source
+ * include/trace/events directory.
+ *
+ * I could specify a path from the define_trace.h file back to this
+ * file.
+ *
+ * #define TRACE_INCLUDE_PATH ../../samples/trace_events
+ *
+ * But the safest and easiest way to simply make it use the directory
+ * that the file is in is to add in the Makefile:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(src)
+ *
+ * This will make sure the current path is part of the include
+ * structure for our file so that define_trace.h can find it.
+ *
+ * I could have made only the top level directory the include:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(PWD)
+ *
+ * And then let the path to this directory be the TRACE_INCLUDE_PATH:
+ *
+ * #define TRACE_INCLUDE_PATH samples/trace_events
+ *
+ * But then if something defines "samples" or "trace_events" as a macro
+ * then we could risk that being converted too, and give us an unexpected
+ * result.
+ */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#define TRACE_INCLUDE_FILE sep_trace_events
+#include <trace/define_trace.h>
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index b83bba19b7f..cb04a87b201 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -42,7 +42,7 @@ TODO:
Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
<charrer@alacritech.com> as well as they are also able to test out any
changes.
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index ae0035f327e..83c582ed12e 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -41,7 +41,6 @@
#ifdef CONFIG_PM
#include <linux/pm.h>
-#include <linux/module.h>
#endif
#include "smtcfb.h"
@@ -443,7 +442,7 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
}
#ifdef __BIG_ENDIAN
-static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t
+static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
count, loff_t *ppos)
{
unsigned long p = *ppos;
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index c5e6989e65a..ab95af2b9c0 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -38,7 +38,7 @@
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
-extern char *smtc_RegBaseAddress;
+extern char __iomem *smtc_RegBaseAddress;
#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
diff --git a/drivers/telephony/Kconfig b/drivers/staging/telephony/Kconfig
index b5f78b6ed2b..b5f78b6ed2b 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/staging/telephony/Kconfig
diff --git a/drivers/telephony/Makefile b/drivers/staging/telephony/Makefile
index 1206615d69e..1206615d69e 100644
--- a/drivers/telephony/Makefile
+++ b/drivers/staging/telephony/Makefile
diff --git a/drivers/staging/telephony/TODO b/drivers/staging/telephony/TODO
new file mode 100644
index 00000000000..d47dec3508d
--- /dev/null
+++ b/drivers/staging/telephony/TODO
@@ -0,0 +1,10 @@
+TODO
+. Determine if the boards are still in use
+ and move this module back to drivers/telephony if necessary
+. Coding style cleanups
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+cc Joe Perches <joe@perches.com> if the module should be reactivated.
+
+If no module activity occurs before version 3.6 is released, this
+module should be removed.
diff --git a/drivers/telephony/ixj-ver.h b/drivers/staging/telephony/ixj-ver.h
index 2031ac6c888..2031ac6c888 100644
--- a/drivers/telephony/ixj-ver.h
+++ b/drivers/staging/telephony/ixj-ver.h
diff --git a/drivers/telephony/ixj.c b/drivers/staging/telephony/ixj.c
index d5f923bcdff..d5f923bcdff 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/staging/telephony/ixj.c
diff --git a/drivers/telephony/ixj.h b/drivers/staging/telephony/ixj.h
index 2c841134f61..2c841134f61 100644
--- a/drivers/telephony/ixj.h
+++ b/drivers/staging/telephony/ixj.h
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/staging/telephony/ixj_pcmcia.c
index 05032e2cc95..05032e2cc95 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/staging/telephony/ixj_pcmcia.c
diff --git a/drivers/telephony/phonedev.c b/drivers/staging/telephony/phonedev.c
index 1915af20117..1915af20117 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/staging/telephony/phonedev.c
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 21a559ecbbb..0dd479f5638 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -31,12 +31,6 @@ config TIDSPBRIDGE_MEMPOOL_SIZE
Allocate specified size of memory at booting time to avoid allocation
failure under heavy memory fragmentation after some use time.
-config TIDSPBRIDGE_DEBUG
- bool "Debug Support"
- depends on TIDSPBRIDGE
- help
- Say Y to enable Bridge debugging capabilities
-
config TIDSPBRIDGE_RECOVERY
bool "Recovery Support"
depends on TIDSPBRIDGE
@@ -58,22 +52,6 @@ config TIDSPBRIDGE_CACHE_LINE_CHECK
This can lead to heap corruption. Say Y, to enforce the check for 128
byte alignment, buffers failing this check will be rejected.
-config TIDSPBRIDGE_WDT3
- bool "Enable watchdog timer"
- depends on TIDSPBRIDGE
- help
- WTD3 is managed by DSP and once it is enabled, DSP side bridge is in
- charge of refreshing the timer before overflow, if the DSP hangs MPU
- will caught the interrupt and try to recover DSP.
-
-config TIDSPBRIDGE_WDT_TIMEOUT
- int "Watchdog timer timeout (in secs)"
- depends on TIDSPBRIDGE && TIDSPBRIDGE_WDT3
- default 5
- help
- Watchdog timer timeout value, after that time if the watchdog timer
- counter is not reset the wdt overflow interrupt will be triggered
-
config TIDSPBRIDGE_NTFY_PWRERR
bool "Notify power errors"
depends on TIDSPBRIDGE
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index fd6a2761cc3..8c8c92a9083 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
+obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
libgen = gen/gh.o gen/uuidutil.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
@@ -13,7 +13,7 @@ libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
dynload/tramp.o
libhw = hw/hw_mmu.o
-bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
+tidspbridge-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
$(libdload) $(libhw)
#Machine dependent
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 6d66e7d0fba..e0c7e4c470c 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -50,9 +50,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -123,7 +120,6 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
CHNL_IS_OUTPUT(pchnl->chnl_mode))
return -EPIPE;
/* No other possible states left */
- DBC_ASSERT(0);
}
dev_obj = dev_get_first();
@@ -190,7 +186,6 @@ func_cont:
* Note: for dma chans dw_dsp_addr contains dsp address
* of SM buffer.
*/
- DBC_ASSERT(chnl_mgr_obj->word_size != 0);
/* DSP address */
chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
chnl_packet_obj->byte_size = byte_size;
@@ -201,7 +196,6 @@ func_cont:
CHNL_IOCSTATCOMPLETE);
list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
pchnl->cio_reqs++;
- DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
/*
* If end of stream, update the channel state to prevent
* more IOR's.
@@ -209,8 +203,6 @@ func_cont:
if (is_eos)
pchnl->state |= CHNL_STATEEOS;
- /* Legacy DSM Processor-Copy */
- DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
/* Request IO from the DSP */
io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
@@ -283,7 +275,6 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
list_add_tail(&chirp->link, &pchnl->io_completions);
pchnl->cio_cs++;
pchnl->cio_reqs--;
- DBC_ASSERT(pchnl->cio_reqs >= 0);
}
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@@ -311,8 +302,6 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
status = bridge_chnl_cancel_io(chnl_obj);
if (status)
return status;
- /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
- DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
/* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
/* Free the slot in the channel manager: */
pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
@@ -358,13 +347,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *chnl_mgr_obj = NULL;
u8 max_channels;
- /* Check DBC requirements: */
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
- DBC_REQUIRE(mgr_attrts->max_channels > 0);
- DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
- DBC_REQUIRE(mgr_attrts->word_size != 0);
-
/* Allocate channel manager object */
chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
if (chnl_mgr_obj) {
@@ -374,7 +356,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
* mgr_attrts->max_channels = CHNL_MAXCHANNELS =
* DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
*/
- DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
/* Create array of channels */
chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
@@ -491,7 +472,6 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
pchnl->state &= ~CHNL_STATECANCEL;
}
}
- DBC_ENSURE(status || list_empty(&pchnl->io_requests));
return status;
}
@@ -592,7 +572,6 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
if (dequeue_ioc) {
/* Dequeue IOC and set chan_ioc; */
- DBC_ASSERT(!list_empty(&pchnl->io_completions));
chnl_packet_obj = list_first_entry(&pchnl->io_completions,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
@@ -705,8 +684,6 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
struct chnl_mgr *chnl_mgr_obj;
int status = 0;
- DBC_REQUIRE(chnl_obj);
-
chnl_mode = chnl_obj->chnl_mode;
chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
@@ -736,10 +713,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
struct chnl_object *pchnl = NULL;
struct sync_object *sync_event = NULL;
- /* Ensure DBC requirements: */
- DBC_REQUIRE(chnl != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(hchnl_mgr != NULL);
+
*chnl = NULL;
/* Validate Args: */
@@ -761,7 +735,6 @@ int bridge_chnl_open(struct chnl_object **chnl,
return status;
}
- DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
/* Create channel object: */
pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
@@ -850,7 +823,6 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
{
int status = 0;
- DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
if (event_mask)
status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
@@ -906,8 +878,6 @@ static void free_chirp_list(struct list_head *chirp_list)
{
struct chnl_irp *chirp, *tmp;
- DBC_REQUIRE(chirp_list != NULL);
-
list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
list_del(&chirp->link);
kfree(chirp);
@@ -924,8 +894,6 @@ static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
int status = -ENOSR;
u32 i;
- DBC_REQUIRE(chnl_mgr_obj);
-
for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
if (chnl_mgr_obj->channels[i] == NULL) {
status = 0;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 7eb56178fb6..c7df34e6b60 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -29,9 +29,6 @@
#include <dspbridge/dev.h>
#include "_tiomap.h"
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/clk.h>
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 694c0e5e55c..9b50b5bd4ed 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -33,9 +33,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* Services Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -114,7 +111,7 @@ struct io_mgr {
struct mgr_processorextinfo ext_proc_info;
struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
struct work_struct io_workq; /* workqueue */
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
u32 trace_buffer_begin; /* Trace message start address */
u32 trace_buffer_end; /* Trace message end address */
u32 trace_buffer_current; /* Trace message current address */
@@ -246,7 +243,7 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
/* Free IO DPC object */
tasklet_kill(&hio_mgr->dpc_tasklet);
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
kfree(hio_mgr->msg);
#endif
dsp_wdt_exit();
@@ -386,7 +383,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT;
}
if (!status) {
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
status =
cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
#else
@@ -731,7 +728,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
hmsg_mgr->max_msgs);
memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
@@ -910,7 +907,7 @@ void io_dpc(unsigned long ref_data)
}
#endif
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
/* Notify DSP Trace message */
print_dsp_debug_trace(pio_mgr);
@@ -973,29 +970,16 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
chnl_mgr_obj = io_manager->chnl_mgr;
sm = io_manager->shared_mem;
if (io_mode == IO_INPUT) {
- /*
- * Assertion fires if CHNL_AddIOReq() called on a stream
- * which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
- (pchnl->state == CHNL_STATEEOS));
/* Indicate to the DSP we have a buffer available for input */
set_chnl_busy(sm, pchnl->chnl_id);
*mbx_val = MBX_PCPY_CLASS;
} else if (io_mode == IO_OUTPUT) {
/*
- * This assertion fails if CHNL_AddIOReq() was called on a
- * stream which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
- CHNL_STATEREADY);
- /*
* Record the fact that we have a buffer available for
* output.
*/
chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
} else {
- DBC_ASSERT(io_mode); /* Shouldn't get here. */
}
func_end:
return;
@@ -1087,7 +1071,6 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
dw_arg = sm->arg;
if (chnl_id >= CHNL_MAXCHANNELS) {
/* Shouldn't be here: would indicate corrupted shm. */
- DBC_ASSERT(chnl_id);
goto func_end;
}
pchnl = chnl_mgr_obj->channels[chnl_id];
@@ -1683,7 +1666,7 @@ int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
}
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
void print_dsp_debug_trace(struct io_mgr *hio_mgr)
{
u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 94d9e04a22f..ce9557e16eb 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -20,9 +20,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index dde559d06c4..7862513cc29 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -27,9 +27,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
@@ -256,9 +253,6 @@ static void bad_page_dump(u32 pa, struct page *pg)
void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
const char *driver_file_name)
{
-
- DBC_REQUIRE(driver_file_name != NULL);
-
if (strcmp(driver_file_name, "UMA") == 0)
*drv_intf = &drv_interface_fxns;
else
@@ -389,6 +383,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 clk_cmd;
struct io_mgr *hio_mgr;
u32 ul_load_monitor_timer;
+ u32 wdt_en = 0;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
@@ -399,16 +394,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
(void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
- DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */
ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
- DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
/* Write a signature into the shm base + offset; this will
@@ -603,9 +595,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
if (!wait_for_start(dev_context, dw_sync_addr))
status = -ETIMEDOUT;
- /* Start wdt */
- dsp_wdt_sm_set((void *)ul_shm_base);
- dsp_wdt_enable(true);
+ dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
+ if (wdt_en) {
+ /* Start wdt */
+ dsp_wdt_sm_set((void *)ul_shm_base);
+ dsp_wdt_enable(true);
+ }
status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (hio_mgr) {
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 02dd4391309..16a4aafa86a 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -303,7 +303,6 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
}
/* TODO -- Assert may be a too hard restriction here.. May be we should
* just return with failure when the CLK ID does not match */
- /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
if (clk_id_index == MBX_PM_MAX_RESOURCES) {
/* return with a more meaningfull error code */
return -EPERM;
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index dfb356eb672..7fda10c3686 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -21,9 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/drv.h>
@@ -68,20 +65,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
}
- DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
if (!status && !ul_trace_sec_beg) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
- DBC_ASSERT(ul_trace_sec_beg != 0);
if (!status && !ul_trace_sec_end) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
- DBC_ASSERT(ul_trace_sec_end != 0);
if (!status) {
if ((dsp_addr <= ul_trace_sec_end) &&
@@ -105,19 +99,16 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
- DBC_ASSERT(ul_dyn_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTBASE, &ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTEND, &ul_ext_end);
}
- DBC_ASSERT(ul_ext_end != 0);
/* Trace buffer is right after the shm SEG0,
* so set the base address to SHMBASE */
@@ -126,8 +117,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
ul_ext_end = ul_trace_sec_end;
}
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
status = -EPERM;
@@ -135,7 +124,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
if (!status) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].gpp_va;
@@ -271,7 +259,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
/* Get SHM_BEG EXT_BEG and EXT_END. */
ret = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
- DBC_ASSERT(ul_shm_base_virt != 0);
if (dynamic_load) {
if (!ret) {
if (symbols_reloaded)
@@ -280,7 +267,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
(dev_context->dev_obj, DYNEXTBASE,
&ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!ret) {
/* DR OMAPS00013235 : DLModules array may be
* in EXTMEM. It is expected that DYNEXTMEM and
@@ -299,7 +285,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
dev_get_symbol
(dev_context->dev_obj, EXTBASE,
&ul_ext_base);
- DBC_ASSERT(ul_ext_base != 0);
if (!ret)
ret =
dev_get_symbol
@@ -312,15 +297,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
if (trace_load)
ul_ext_base = ul_shm_base_virt;
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
ret = -EPERM;
if (!ret) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
ret = dev_get_symbol
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 2126f597753..70055c8111e 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -25,8 +25,6 @@
#include <dspbridge/host_os.h>
-#ifdef CONFIG_TIDSPBRIDGE_WDT3
-
#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
static struct dsp_wdt_setting dsp_wdt;
@@ -84,7 +82,7 @@ int dsp_wdt_init(void)
void dsp_wdt_sm_set(void *data)
{
dsp_wdt.sm_wdt = data;
- dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT;
+ dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */
}
@@ -128,23 +126,3 @@ void dsp_wdt_enable(bool enable)
clk_disable(dsp_wdt.fclk);
}
}
-
-#else
-void dsp_wdt_enable(bool enable)
-{
-}
-
-void dsp_wdt_sm_set(void *data)
-{
-}
-
-int dsp_wdt_init(void)
-{
- return 0;
-}
-
-void dsp_wdt_exit(void)
-{
-}
-#endif
-
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index 60aa7b063c9..25eaef782aa 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -95,15 +95,6 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
}
/*
- * ======== gh_exit ========
- */
-
-void gh_exit(void)
-{
- /* Do nothing */
-}
-
-/*
* ======== gh_find ========
*/
@@ -122,15 +113,6 @@ void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
}
/*
- * ======== gh_init ========
- */
-
-void gh_init(void)
-{
- /* Do nothing */
-}
-
-/*
* ======== gh_insert ========
*/
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
index ff6ebadf98f..b44656cf785 100644
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/uuidutil.h>
@@ -41,8 +38,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
{
s32 i; /* return result from snprintf. */
- DBC_REQUIRE(uuid_obj && sz_uuid);
-
i = snprintf(sz_uuid, size,
"%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
uuid_obj->data1, uuid_obj->data2, uuid_obj->data3,
@@ -50,8 +45,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
uuid_obj->data6[0], uuid_obj->data6[1],
uuid_obj->data6[2], uuid_obj->data6[3],
uuid_obj->data6[4], uuid_obj->data6[5]);
-
- DBC_ENSURE(i != -1);
}
static s32 uuid_hex_to_bin(char *buf, s32 len)
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
index 6e7ab4fd8c3..cc95a18f1db 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -99,14 +99,10 @@ struct shm {
struct opp_rqst_struct opp_request;
/* load monitor information structure */
struct load_mon_struct load_mon_info;
-#ifdef CONFIG_TIDSPBRIDGE_WDT3
/* Flag for WDT enable/disable F/I clocks */
u32 wdt_setclocks;
u32 wdt_overflow; /* WDT overflow time */
char dummy[176]; /* padding to 256 byte boundary */
-#else
- char dummy[184]; /* padding to 256 byte boundary */
-#endif
u32 shm_dbg_var[64]; /* shared memory debug variables */
};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
index 92f6a13424f..9b018b1f9bf 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
@@ -48,7 +48,6 @@
* -ECHRNG: This manager cannot handle this many channels.
* -EEXIST: Channel manager already exists for this device.
* Requires:
- * chnl_init(void) called.
* channel_mgr != NULL.
* mgr_attrts != NULL.
* Ensures:
@@ -70,7 +69,6 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
* 0: Success.
* -EFAULT: hchnl_mgr was invalid.
* Requires:
- * chnl_init(void) called.
* Ensures:
* 0: Cancels I/O on each open channel.
* Closes each open channel.
@@ -79,31 +77,4 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
*/
extern int chnl_destroy(struct chnl_mgr *hchnl_mgr);
-/*
- * ======== chnl_exit ========
- * Purpose:
- * Discontinue usage of the CHNL module.
- * Parameters:
- * Returns:
- * Requires:
- * chnl_init(void) previously called.
- * Ensures:
- * Resources, if any acquired in chnl_init(void), are freed when the last
- * client of CHNL calls chnl_exit(void).
- */
-extern void chnl_exit(void);
-
-/*
- * ======== chnl_init ========
- * Purpose:
- * Initialize the CHNL module's private state.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public CHNL functions.
- */
-extern bool chnl_init(void);
-
#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index aff22051cf5..c66bcf7ea90 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -79,7 +79,6 @@ extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
* -EPERM: Failed to initialize critical sect sync object.
*
* Requires:
- * cmm_init(void) called.
* ph_cmm_mgr != NULL.
* mgr_attrts->min_block_size >= 4 bytes.
* Ensures:
@@ -111,20 +110,6 @@ extern int cmm_create(struct cmm_object **ph_cmm_mgr,
extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
/*
- * ======== cmm_exit ========
- * Purpose:
- * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero.
- * Parameters:
- * n/a
- * Returns:
- * n/a
- * Requires:
- * CMM is initialized.
- * Ensures:
- */
-extern void cmm_exit(void);
-
-/*
* ======== cmm_free_buf ========
* Purpose:
* Free the given buffer.
@@ -185,19 +170,6 @@ extern int cmm_get_info(struct cmm_object *hcmm_mgr,
struct cmm_info *cmm_info_obj);
/*
- * ======== cmm_init ========
- * Purpose:
- * Initializes private state of CMM module.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * CMM initialized.
- */
-extern bool cmm_init(void);
-
-/*
* ======== cmm_register_gppsm_seg ========
* Purpose:
* Register a block of SM with the CMM.
@@ -333,7 +305,6 @@ extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
* 0: Success.
* -EFAULT: Bad translator handle.
* Requires:
- * (refs > 0)
* (paddr != NULL)
* (ul_size > 0)
* Ensures:
@@ -355,7 +326,6 @@ extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
* Returns:
* Valid address on success, else NULL.
* Requires:
- * refs > 0
* paddr != NULL
* xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h
index cb684c11b30..ba2005d0242 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cod.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cod.h
@@ -100,21 +100,6 @@ extern int cod_create(struct cod_manager **mgr,
extern void cod_delete(struct cod_manager *cod_mgr_obj);
/*
- * ======== cod_exit ========
- * Purpose:
- * Discontinue usage of the COD module.
- * Parameters:
- * None.
- * Returns:
- * None.
- * Requires:
- * COD initialized.
- * Ensures:
- * Resources acquired in cod_init(void) are freed.
- */
-extern void cod_exit(void);
-
-/*
* ======== cod_get_base_lib ========
* Purpose:
* Get handle to the base image DBL library.
@@ -243,20 +228,6 @@ extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj,
char *str_sym, u32 * pul_value);
/*
- * ======== cod_init ========
- * Purpose:
- * Initialize the COD module's private state.
- * Parameters:
- * None.
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public COD functions.
- */
-extern bool cod_init(void);
-
-/*
* ======== cod_load_base ========
* Purpose:
* Load the initial program image, optionally with command-line arguments,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbc.h b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
deleted file mode 100644
index 463760f499a..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dbc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * dbc.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * "Design by Contract" programming macros.
- *
- * Notes:
- * Requires that the GT->ERROR function has been defaulted to a valid
- * error handler for the given execution environment.
- *
- * Does not require that GT_init() be called.
- *
- * Copyright (C) 2008 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef DBC_
-#define DBC_
-
-/* Assertion Macros: */
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
-
-#define DBC_ASSERT(exp) \
- if (!(exp)) \
- pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
- __FILE__, __LINE__)
-#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
-#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
-
-#else
-
-#define DBC_ASSERT(exp) {}
-#define DBC_REQUIRE(exp) {}
-#define DBC_ENSURE(exp) {}
-
-#endif /* DEBUG */
-
-#endif /* DBC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index f92b4be0b41..fa2d79ef6cc 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -478,33 +478,6 @@ extern int dev_get_bridge_context(struct dev_object *hdev_obj,
**phbridge_context);
/*
- * ======== dev_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- * Parameters:
- * Returns:
- * Requires:
- * DEV is initialized.
- * Ensures:
- * When reference count == 0, DEV's private resources are freed.
- */
-extern void dev_exit(void);
-
-/*
- * ======== dev_init ========
- * Purpose:
- * Initialize DEV's private state, keeping a reference count on each call.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * TRUE: A requirement for the other public DEV functions.
- */
-extern bool dev_init(void);
-
-/*
* ======== dev_insert_proc_object ========
* Purpose:
* Inserts the Processor Object into the List of PROC Objects
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h
index 5dfdc8cfb93..39d3cea9ca8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/disp.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/disp.h
@@ -53,7 +53,6 @@ struct disp_attr {
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: Unable to create dispatcher.
* Requires:
- * disp_init(void) called.
* disp_attrs != NULL.
* hdev_obj != NULL.
* dispatch_obj != NULL.
@@ -73,7 +72,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
* disp_obj: Node Dispatcher object.
* Returns:
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* Ensures:
* disp_obj is invalid.
@@ -81,31 +79,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
extern void disp_delete(struct disp_object *disp_obj);
/*
- * ======== disp_exit ========
- * Discontinue usage of DISP module.
- *
- * Parameters:
- * Returns:
- * Requires:
- * disp_init(void) previously called.
- * Ensures:
- * Any resources acquired in disp_init(void) will be freed when last DISP
- * client calls disp_exit(void).
- */
-extern void disp_exit(void);
-
-/*
- * ======== disp_init ========
- * Initialize the DISP module.
- *
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool disp_init(void);
-
-/*
* ======== disp_node_change_priority ========
* Change the priority of a node currently running on the target.
*
@@ -120,7 +93,6 @@ extern bool disp_init(void);
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
@@ -148,7 +120,6 @@ extern int disp_node_change_priority(struct disp_object
* -ETIME: A timeout occurred before the DSP responded.
* -EPERM: A failure occurred, unable to create node.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* pargs != NULL.
* hnode != NULL.
@@ -178,7 +149,6 @@ extern int disp_node_create(struct disp_object *disp_obj,
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
@@ -204,7 +174,6 @@ extern int disp_node_delete(struct disp_object *disp_obj,
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
index 6c58335c5f6..c3487be8fcf 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -61,10 +61,6 @@ extern int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts);
-extern bool dmm_init(void);
-
-extern void dmm_exit(void);
-
extern int dmm_create_tables(struct dmm_object *dmm_mgr,
u32 addr, u32 size);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 9cdbd955dce..b0c7708321b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -199,17 +199,6 @@ extern int drv_create(struct drv_object **drv_obj);
extern int drv_destroy(struct drv_object *driver_obj);
/*
- * ======== drv_exit ========
- * Purpose:
- * Exit the DRV module, freeing any modules initialized in drv_init.
- * Parameters:
- * Returns:
- * Requires:
- * Ensures:
- */
-extern void drv_exit(void);
-
-/*
* ======== drv_get_first_dev_object ========
* Purpose:
* Returns the Ptr to the FirstDev Object in the List
@@ -294,18 +283,6 @@ extern u32 drv_get_next_dev_object(u32 hdev_obj);
extern u32 drv_get_next_dev_extension(u32 dev_extension);
/*
- * ======== drv_init ========
- * Purpose:
- * Initialize the DRV module.
- * Parameters:
- * Returns:
- * TRUE if success; FALSE otherwise.
- * Requires:
- * Ensures:
- */
-extern int drv_init(void);
-
-/*
* ======== drv_insert_dev_object ========
* Purpose:
* Insert a DeviceObject into the list of Driver object.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
index 9de291d1f56..da85079dbfb 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/gh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -23,9 +23,7 @@ extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
bool(*match) (void *, void *),
void (*delete) (void *));
extern void gh_delete(struct gh_t_hash_tab *hash_tab);
-extern void gh_exit(void);
extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
-extern void gh_init(void);
extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
void gh_iterate(struct gh_t_hash_tab *hash_tab,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h
index 500bbd71684..75057185690 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io.h
@@ -55,7 +55,6 @@ struct io_attrs {
* -EINVAL: Invalid DSP word size (must be > 0).
* Invalid base address for DSP communications.
* Requires:
- * io_init(void) called.
* io_man != NULL.
* mgr_attrts != NULL.
* Ensures:
@@ -74,36 +73,8 @@ extern int io_create(struct io_mgr **io_man,
* 0: Success.
* -EFAULT: hio_mgr was invalid.
* Requires:
- * io_init(void) called.
* Ensures:
*/
extern int io_destroy(struct io_mgr *hio_mgr);
-/*
- * ======== io_exit ========
- * Purpose:
- * Discontinue usage of the IO module.
- * Parameters:
- * Returns:
- * Requires:
- * io_init(void) previously called.
- * Ensures:
- * Resources, if any acquired in io_init(void), are freed when the last
- * client of IO calls io_exit(void).
- */
-extern void io_exit(void);
-
-/*
- * ======== io_init ========
- * Purpose:
- * Initialize the IO module's private state.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public CHNL functions.
- */
-extern bool io_init(void);
-
#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index a054dad2133..903ff12b14d 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -154,8 +154,6 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context);
void dump_dl_modules(struct bridge_dev_context *bridge_context);
-#endif
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
void print_dsp_debug_trace(struct io_mgr *hio_mgr);
#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msg.h b/drivers/staging/tidspbridge/include/dspbridge/msg.h
index 95778bcb5aa..2c8712c933f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/msg.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/msg.h
@@ -34,7 +34,6 @@
* msg_callback: Called whenever an RMS_EXIT message is received.
* Returns:
* Requires:
- * msg_mod_init(void) called.
* msg_man != NULL.
* hdev_obj != NULL.
* msg_callback != NULL.
@@ -52,35 +51,9 @@ extern int msg_create(struct msg_mgr **msg_man,
* hmsg_mgr: Handle returned from msg_create().
* Returns:
* Requires:
- * msg_mod_init(void) called.
* Valid hmsg_mgr.
* Ensures:
*/
extern void msg_delete(struct msg_mgr *hmsg_mgr);
-/*
- * ======== msg_exit ========
- * Purpose:
- * Discontinue usage of msg_ctrl module.
- * Parameters:
- * Returns:
- * Requires:
- * msg_mod_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in msg_mod_init(void) will be freed when last
- * msg_ctrl client calls msg_exit(void).
- */
-extern void msg_exit(void);
-
-/*
- * ======== msg_mod_init ========
- * Purpose:
- * Initialize the msg_ctrl module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool msg_mod_init(void);
-
#endif /* MSG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldr.h b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
index d9653ee667e..c5e48ca6c54 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldr.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
@@ -36,7 +36,6 @@ extern int nldr_create(struct nldr_object **nldr,
const struct nldr_attrs *pattrs);
extern void nldr_delete(struct nldr_object *nldr_obj);
-extern void nldr_exit(void);
extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
char *str_fxn, u32 * addr);
@@ -44,7 +43,6 @@ extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
extern int nldr_get_rmm_manager(struct nldr_object *nldr,
struct rmm_target_obj **rmm_mgr);
-extern bool nldr_init(void);
extern int nldr_load(struct nldr_nodeobject *nldr_node_obj,
enum nldr_phase phase);
extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
index ee3a85f08fc..7e3c7f58b49 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
@@ -119,7 +119,6 @@ enum nldr_phase {
* 0: Success.
* -ENOMEM: Insufficient memory on GPP.
* Requires:
- * nldr_init(void) called.
* Valid nldr_obj.
* node_props != NULL.
* nldr_nodeobj != NULL.
@@ -148,7 +147,6 @@ typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj,
* 0: Success;
* -ENOMEM: Insufficient memory for requested resources.
* Requires:
- * nldr_init(void) called.
* nldr != NULL.
* hdev_obj != NULL.
* pattrs != NULL.
@@ -168,7 +166,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
* nldr_obj: Node manager object.
* Returns:
* Requires:
- * nldr_init(void) called.
* Valid nldr_obj.
* Ensures:
* nldr_obj invalid
@@ -176,20 +173,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj);
/*
- * ======== nldr_exit ========
- * Discontinue usage of NLDR module.
- *
- * Parameters:
- * Returns:
- * Requires:
- * nldr_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in nldr_init(void) will be freed when last NLDR
- * client calls nldr_exit(void).
- */
-typedef void (*nldr_exitfxn) (void);
-
-/*
* ======== NLDR_Free ========
* Free resources allocated in nldr_allocate.
*
@@ -197,7 +180,6 @@ typedef void (*nldr_exitfxn) (void);
* nldr_node_obj: Handle returned from nldr_allocate().
* Returns:
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -216,7 +198,6 @@ typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj);
* 0: Success.
* -ESPIPE: Address of function not found.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* addr != NULL;
* str_fxn != NULL;
@@ -227,17 +208,6 @@ typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject
char *str_fxn, u32 * addr);
/*
- * ======== nldr_init ========
- * Initialize the NLDR module.
- *
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-typedef bool(*nldr_initfxn) (void);
-
-/*
* ======== nldr_load ========
* Load create, delete, or execute phase function of a node on the DSP.
*
@@ -251,7 +221,6 @@ typedef bool(*nldr_initfxn) (void);
* is already in use.
* -EILSEQ: Failure in dynamic loader library.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -269,7 +238,6 @@ typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj,
* 0: Success.
* -ENOMEM: Insufficient memory on GPP.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -283,9 +251,7 @@ struct node_ldr_fxns {
nldr_allocatefxn allocate;
nldr_createfxn create;
nldr_deletefxn delete;
- nldr_exitfxn exit;
nldr_getfxnaddrfxn get_fxn_addr;
- nldr_initfxn init;
nldr_loadfxn load;
nldr_unloadfxn unload;
};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h
index 16371d818e3..7397b7a12f7 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/node.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/node.h
@@ -47,7 +47,6 @@
* -EPERM: A failure occurred, unable to allocate node.
* -EBADR: Proccessor is not in the running state.
* Requires:
- * node_init(void) called.
* hprocessor != NULL.
* node_uuid != NULL.
* noderes != NULL.
@@ -81,7 +80,6 @@ extern int node_allocate(struct proc_object *hprocessor,
* -EPERM: General Failure.
* -EINVAL: Invalid Size.
* Requires:
- * node_init(void) called.
* pbuffer != NULL.
* Ensures:
*/
@@ -106,7 +104,6 @@ extern int node_alloc_msg_buf(struct node_object *hnode,
* or NODE_RUNNING state.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * node_init(void) called.
* Ensures:
* 0 && (Node's current priority == prio)
*/
@@ -157,7 +154,6 @@ extern int node_change_priority(struct node_object *hnode, s32 prio);
* Device node to device node, or device node to
* GPP), the two nodes are on different DSPs.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_connect(struct node_object *node1,
@@ -185,7 +181,6 @@ extern int node_connect(struct node_object *node1,
* -ETIME: A timeout occurred before the DSP responded.
* -EPERM: A failure occurred, unable to create node.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_create(struct node_object *hnode);
@@ -206,7 +201,6 @@ extern int node_create(struct node_object *hnode);
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: General failure.
* Requires:
- * node_init(void) called.
* node_man != NULL.
* hdev_obj != NULL.
* Ensures:
@@ -234,7 +228,6 @@ extern int node_create_mgr(struct node_mgr **node_man,
* -EPERM: A failure occurred in deleting the node.
* -ESPIPE: Delete function not found in the COFF file.
* Requires:
- * node_init(void) called.
* Ensures:
* 0: hnode is invalid.
*/
@@ -250,7 +243,6 @@ extern int node_delete(struct node_res_object *noderes,
* Returns:
* 0: Success.
* Requires:
- * node_init(void) called.
* Valid hnode_mgr.
* Ensures:
*/
@@ -287,20 +279,6 @@ extern int node_enum_nodes(struct node_mgr *hnode_mgr,
u32 *pu_allocated);
/*
- * ======== node_exit ========
- * Purpose:
- * Discontinue usage of NODE module.
- * Parameters:
- * Returns:
- * Requires:
- * node_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in node_init(void) will be freed when last NODE
- * client calls node_exit(void).
- */
-extern void node_exit(void);
-
-/*
* ======== node_free_msg_buf ========
* Purpose:
* Free a message buffer previously allocated with node_alloc_msg_buf.
@@ -313,7 +291,6 @@ extern void node_exit(void);
* -EFAULT: Invalid node handle.
* -EPERM: Failure to free the buffer.
* Requires:
- * node_init(void) called.
* pbuffer != NULL.
* Ensures:
*/
@@ -336,7 +313,6 @@ extern int node_free_msg_buf(struct node_object *hnode,
* 0: Success.
* -EFAULT: Invalid hnode.
* Requires:
- * node_init(void) called.
* pattr != NULL.
* Ensures:
* 0: *pattrs contains the node's current attributes.
@@ -363,7 +339,6 @@ extern int node_get_attr(struct node_object *hnode,
* Error occurred while trying to retrieve a message.
* -ETIME: Timeout occurred and no message is available.
* Requires:
- * node_init(void) called.
* message != NULL.
* Ensures:
*/
@@ -386,17 +361,6 @@ extern int node_get_nldr_obj(struct node_mgr *hnode_mgr,
struct nldr_object **nldr_ovlyobj);
/*
- * ======== node_init ========
- * Purpose:
- * Initialize the NODE module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool node_init(void);
-
-/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node. PROC needs to pass
@@ -425,7 +389,6 @@ void node_on_exit(struct node_object *hnode, s32 node_status);
* -ETIME: A timeout occurred before the DSP responded.
* DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_pause(struct node_object *hnode);
@@ -449,7 +412,6 @@ extern int node_pause(struct node_object *hnode);
* -ETIME: Timeout occurred before message could be set.
* -EBADR: Node is in invalid state for sending messages.
* Requires:
- * node_init(void) called.
* pmsg != NULL.
* Ensures:
*/
@@ -473,7 +435,6 @@ extern int node_put_message(struct node_object *hnode,
* -ENOSYS: Notification type specified by notify_type is not
* supported.
* Requires:
- * node_init(void) called.
* hnotification != NULL.
* Ensures:
*/
@@ -500,7 +461,6 @@ extern int node_register_notify(struct node_object *hnode,
* DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state.
* -ESPIPE: Execute function not found in the COFF file.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_run(struct node_object *hnode);
@@ -523,7 +483,6 @@ extern int node_run(struct node_object *hnode);
* Unable to terminate the node.
* -EBADR: Operation not valid for the current node state.
* Requires:
- * node_init(void) called.
* pstatus != NULL.
* Ensures:
*/
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
index 9c1e06758c8..d5b54bb81e8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
@@ -96,7 +96,6 @@ struct node_createargs {
* -EINVAL: The node's stream corresponding to index and dir
* is not a stream to or from the host.
* Requires:
- * node_init(void) called.
* Valid dir.
* chan_id != NULL.
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index f00dffd5198..a82380ebc04 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -189,20 +189,6 @@ extern int proc_get_resource_info(void *hprocessor,
u32 resource_info_size);
/*
- * ======== proc_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- * Parameters:
- * Returns:
- * Requires:
- * PROC is initialized.
- * Ensures:
- * When reference count == 0, PROC's private resources are freed.
- */
-extern void proc_exit(void);
-
-/*
* ======== proc_get_dev_object =========
* Purpose:
* Returns the DEV Hanlde for a given Processor handle
@@ -223,20 +209,6 @@ extern int proc_get_dev_object(void *hprocessor,
struct dev_object **device_obj);
/*
- * ======== proc_init ========
- * Purpose:
- * Initialize PROC's private state, keeping a reference count on each
- * call.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * TRUE: A requirement for the other public PROC functions.
- */
-extern bool proc_init(void);
-
-/*
* ======== proc_get_state ========
* Purpose:
* Report the state of the specified DSP processor.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmm.h b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
index baea536681e..f7a4dc8ecb4 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/rmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
@@ -115,18 +115,6 @@ extern int rmm_create(struct rmm_target_obj **target_obj,
extern void rmm_delete(struct rmm_target_obj *target);
/*
- * ======== rmm_exit ========
- * Exit the RMM module
- *
- * Parameters:
- * Returns:
- * Requires:
- * rmm_init successfully called.
- * Ensures:
- */
-extern void rmm_exit(void);
-
-/*
* ======== rmm_free ========
* Free or unreserve memory allocated through rmm_alloc().
*
@@ -148,19 +136,6 @@ extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr,
u32 size, bool reserved);
/*
- * ======== rmm_init ========
- * Initialize the RMM module
- *
- * Parameters:
- * Returns:
- * TRUE: Success.
- * FALSE: Failure.
- * Requires:
- * Ensures:
- */
-extern bool rmm_init(void);
-
-/*
* ======== rmm_stat ========
* Obtain memory segment status
*
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h
index 613fe53dd23..dacf0c234fd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/strm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/strm.h
@@ -40,7 +40,6 @@
* -EPERM: Failure occurred, unable to allocate buffers.
* -EINVAL: usize must be > 0 bytes.
* Requires:
- * strm_init(void) called.
* ap_buffer != NULL.
* Ensures:
*/
@@ -63,7 +62,6 @@ extern int strm_allocate_buffer(struct strm_res_object *strmres,
* been reclaimed.
* -EPERM: Failure to close stream.
* Requires:
- * strm_init(void) called.
* Ensures:
*/
extern int strm_close(struct strm_res_object *strmres,
@@ -83,7 +81,6 @@ extern int strm_close(struct strm_res_object *strmres,
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: General failure.
* Requires:
- * strm_init(void) called.
* strm_man != NULL.
* dev_obj != NULL.
* Ensures:
@@ -101,7 +98,6 @@ extern int strm_create(struct strm_mgr **strm_man,
* strm_mgr_obj: Handle to STRM manager object from strm_create.
* Returns:
* Requires:
- * strm_init(void) called.
* Valid strm_mgr_obj.
* Ensures:
* strm_mgr_obj is not valid.
@@ -109,18 +105,6 @@ extern int strm_create(struct strm_mgr **strm_man,
extern void strm_delete(struct strm_mgr *strm_mgr_obj);
/*
- * ======== strm_exit ========
- * Purpose:
- * Discontinue usage of STRM module.
- * Parameters:
- * Returns:
- * Requires:
- * strm_init(void) successfully called before.
- * Ensures:
- */
-extern void strm_exit(void);
-
-/*
* ======== strm_free_buffer ========
* Purpose:
* Free buffer(s) allocated with strm_allocate_buffer.
@@ -133,7 +117,6 @@ extern void strm_exit(void);
* -EFAULT: Invalid stream handle.
* -EPERM: Failure occurred, unable to free buffers.
* Requires:
- * strm_init(void) called.
* ap_buffer != NULL.
* Ensures:
*/
@@ -156,7 +139,6 @@ extern int strm_free_buffer(struct strm_res_object *strmres,
* -EINVAL: stream_info_size < sizeof(dsp_streaminfo).
* -EPERM: Unable to get stream info.
* Requires:
- * strm_init(void) called.
* stream_info != NULL.
* Ensures:
*/
@@ -184,24 +166,11 @@ extern int strm_get_info(struct strm_object *stream_obj,
* -ETIME: A timeout occurred before the stream could be idled.
* -EPERM: Unable to idle stream.
* Requires:
- * strm_init(void) called.
* Ensures:
*/
extern int strm_idle(struct strm_object *stream_obj, bool flush_data);
/*
- * ======== strm_init ========
- * Purpose:
- * Initialize the STRM module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Requires:
- * Ensures:
- */
-extern bool strm_init(void);
-
-/*
* ======== strm_issue ========
* Purpose:
* Send a buffer of data to a stream.
@@ -217,8 +186,7 @@ extern bool strm_init(void);
* -ENOSR: The stream is full.
* -EPERM: Failure occurred, unable to issue buffer.
* Requires:
- * strm_init(void) called.
- * pbuf != NULL.
+* pbuf != NULL.
* Ensures:
*/
extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
@@ -244,7 +212,6 @@ extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
* Unable to open stream.
* -EINVAL: Invalid index.
* Requires:
- * strm_init(void) called.
* strmres != NULL.
* pattr != NULL.
* Ensures:
@@ -275,7 +242,6 @@ extern int strm_open(struct node_object *hnode, u32 dir,
* retrieved.
* -EPERM: Failure occurred, unable to reclaim buffer.
* Requires:
- * strm_init(void) called.
* buf_ptr != NULL.
* nbytes != NULL.
* pdw_arg != NULL.
@@ -302,7 +268,6 @@ extern int strm_reclaim(struct strm_object *stream_obj,
* -ENOSYS: Notification type specified by notify_type is not
* supported.
* Requires:
- * strm_init(void) called.
* hnotification != NULL.
* Ensures:
*/
@@ -328,7 +293,6 @@ extern int strm_register_notify(struct strm_object *stream_obj,
* -ETIME: A timeout occurred before a stream became ready.
* -EPERM: Failure occurred, unable to select a stream.
* Requires:
- * strm_init(void) called.
* strm_tab != NULL.
* strms > 0.
* pmask != NULL.
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
index 245de82e2d6..4bd8686f235 100644
--- a/drivers/staging/tidspbridge/pmgr/chnl.c
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -41,9 +38,6 @@
/* ----------------------------------- This */
#include <dspbridge/chnl.h>
-/* ----------------------------------- Globals */
-static u32 refs;
-
/*
* ======== chnl_create ========
* Purpose:
@@ -58,10 +52,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *hchnl_mgr;
struct chnl_mgr_ *chnl_mgr_obj = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*channel_mgr = NULL;
/* Validate args: */
@@ -99,8 +89,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
}
}
- DBC_ENSURE(status || chnl_mgr_obj);
-
return status;
}
@@ -115,8 +103,6 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
struct bridge_drv_interface *intf_fxns;
int status;
- DBC_REQUIRE(refs > 0);
-
if (chnl_mgr_obj) {
intf_fxns = chnl_mgr_obj->intf_fxns;
/* Let Bridge channel module destroy the chnl_mgr: */
@@ -127,36 +113,3 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
return status;
}
-
-/*
- * ======== chnl_exit ========
- * Purpose:
- * Discontinue usage of the CHNL module.
- */
-void chnl_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== chnl_init ========
- * Purpose:
- * Initialize the CHNL module's private state.
- */
-bool chnl_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index e6b2c8962f8..4a800dadd70 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -35,9 +35,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -134,9 +131,6 @@ struct cmm_mnode {
u32 client_proc; /* Process that allocated this mem block */
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/* ----------------------------------- Function Prototypes */
static void add_to_free_list(struct cmm_allocator *allocator,
struct cmm_mnode *pnode);
@@ -244,9 +238,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
struct cmm_object *cmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
-
*ph_cmm_mgr = NULL;
/* create, zero, and tag a cmm mgr object */
cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
@@ -256,8 +247,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
if (mgr_attrts == NULL)
mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
- /* 4 bytes minimum */
- DBC_ASSERT(mgr_attrts->min_block_size >= 4);
/* save away smallest block allocation for this cmm mgr */
cmm_obj->min_block_size = mgr_attrts->min_block_size;
cmm_obj->page_size = PAGE_SIZE;
@@ -283,7 +272,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
s32 slot_seg;
struct cmm_mnode *node, *tmp;
- DBC_REQUIRE(refs > 0);
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -326,19 +314,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
}
/*
- * ======== cmm_exit ========
- * Purpose:
- * Discontinue usage of module; free resources when reference count
- * reaches 0.
- */
-void cmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-}
-
-/*
* ======== cmm_free_buf ========
* Purpose:
* Free the given buffer.
@@ -351,9 +326,6 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
struct cmm_allocator *allocator;
struct cmm_attrs *pattrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_pa != NULL);
-
if (ul_seg_id == 0) {
pattrs = &cmm_dfltalctattrs;
ul_seg_id = pattrs->seg_id;
@@ -392,8 +364,6 @@ int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -419,8 +389,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
struct cmm_allocator *altr;
struct cmm_mnode *curr;
- DBC_REQUIRE(cmm_info_obj != NULL);
-
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -464,24 +432,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
}
/*
- * ======== cmm_init ========
- * Purpose:
- * Initializes private state of CMM module.
- */
-bool cmm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== cmm_register_gppsm_seg ========
* Purpose:
* Register a block of SM with the CMM to be used for later GPP SM
@@ -499,13 +449,6 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_mnode *new_node;
s32 slot_seg;
- DBC_REQUIRE(ul_size > 0);
- DBC_REQUIRE(sgmt_id != NULL);
- DBC_REQUIRE(dw_gpp_base_pa != 0);
- DBC_REQUIRE(gpp_base_va != 0);
- DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
- (c_factor >= CMM_SUBFROMDSPPA));
-
dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
"dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
@@ -589,7 +532,6 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_allocator *psma;
u32 ul_id = ul_seg_id;
- DBC_REQUIRE(ul_seg_id > 0);
if (!hcmm_mgr)
return -EFAULT;
@@ -635,8 +577,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
{
struct cmm_mnode *curr, *tmp;
- DBC_REQUIRE(psma != NULL);
-
/* free nodes on free list */
list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
list_del(&curr->link);
@@ -664,7 +604,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
static s32 get_slot(struct cmm_object *cmm_mgr_obj)
{
s32 slot_seg = -1; /* neg on failure */
- DBC_REQUIRE(cmm_mgr_obj != NULL);
/* get first available slot in cmm mgr SMSegTab[] */
for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
@@ -687,11 +626,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
{
struct cmm_mnode *pnode;
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE(dw_pa != 0);
- DBC_REQUIRE(dw_va != 0);
- DBC_REQUIRE(ul_size != 0);
-
/* Check cmm mgr's node freelist */
if (list_empty(&cmm_mgr_obj->node_free_list)) {
pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
@@ -719,7 +653,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
*/
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
- DBC_REQUIRE(pnode != NULL);
list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
}
@@ -794,9 +727,6 @@ static void add_to_free_list(struct cmm_allocator *allocator,
static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
u32 ul_seg_id)
{
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
-
return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
}
@@ -818,10 +748,6 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
struct cmm_xlator *xlator_object = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(hcmm_mgr != NULL);
-
*xlator = NULL;
if (xlator_attrs == NULL)
xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
@@ -851,13 +777,6 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
void *tmp_va_buff;
struct cmm_attrs attrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
- DBC_REQUIRE(va_buf != NULL);
- DBC_REQUIRE(pa_size > 0);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
attrs.seg_id = xlator_obj->seg_id;
__raw_writel(0, va_buf);
@@ -887,10 +806,6 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
int status = -EPERM;
void *buf_pa = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_va != NULL);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
/* convert Va to Pa so we can free it. */
buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
@@ -900,7 +815,8 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
if (status) {
/* Uh oh, this shouldn't happen. Descriptor
* gone! */
- DBC_ASSERT(false); /* CMM is leaking mem */
+ pr_err("%s, line %d: Assertion failed\n",
+ __FILE__, __LINE__);
}
}
}
@@ -918,10 +834,6 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
-
if (xlator_obj) {
if (set_info) {
/* set translators virtual address range */
@@ -948,16 +860,11 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
struct cmm_allocator *allocator = NULL;
u32 dw_offset = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
-
if (!xlator_obj)
goto loop_cont;
cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
/* get this translator's default SM allocator */
- DBC_ASSERT(xlator_obj->seg_id > 0);
allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
if (!allocator)
goto loop_cont;
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 1a29264b585..4007826f7ab 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -30,9 +30,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
/* Include appropriate loader header file */
#include <dspbridge/dbll.h>
@@ -61,8 +58,6 @@ struct cod_libraryobj {
struct cod_manager *cod_mgr;
};
-static u32 refs = 0L;
-
static struct dbll_fxns ldr_fxns = {
(dbll_close_fxn) dbll_close,
(dbll_create_fxn) dbll_create,
@@ -183,10 +178,6 @@ void cod_close(struct cod_libraryobj *lib)
{
struct cod_manager *hmgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
-
hmgr = lib->cod_mgr;
hmgr->fxns.close_fxn(lib->dbll_lib);
@@ -208,9 +199,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
struct dbll_attrs zl_attrs;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
/* assume failure */
*mgr = NULL;
@@ -263,9 +251,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
*/
void cod_delete(struct cod_manager *cod_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
-
if (cod_mgr_obj->base_lib) {
if (cod_mgr_obj->loaded)
cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
@@ -281,21 +266,6 @@ void cod_delete(struct cod_manager *cod_mgr_obj)
}
/*
- * ======== cod_exit ========
- * Purpose:
- * Discontinue usage of the COD module.
- *
- */
-void cod_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== cod_get_base_lib ========
* Purpose:
* Get handle to the base image DBL library.
@@ -305,10 +275,6 @@ int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(plib != NULL);
-
*plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
return status;
@@ -322,10 +288,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(sz_name != NULL);
-
if (usize <= COD_MAXPATHLENGTH)
strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
else
@@ -342,10 +304,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
*/
int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(entry_pt != NULL);
-
*entry_pt = cod_mgr_obj->entry;
return 0;
@@ -361,10 +319,6 @@ int cod_get_loader(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(loader != NULL);
-
*loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
return status;
@@ -382,13 +336,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
struct cod_manager *cod_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(len != NULL);
-
*addr = 0;
*len = 0;
if (lib != NULL) {
@@ -399,8 +346,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
status = -ESPIPE;
}
- DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
-
return status;
}
@@ -417,11 +362,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
{
struct dbll_sym_val *dbll_sym;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(str_sym != NULL);
- DBC_REQUIRE(pul_value != NULL);
-
dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
__func__, cod_mgr_obj, str_sym, pul_value);
if (cod_mgr_obj->base_lib) {
@@ -442,25 +382,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
}
/*
- * ======== cod_init ========
- * Purpose:
- * Initialize the COD module's private state.
- *
- */
-bool cod_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
- return ret;
-}
-
-/*
* ======== cod_load_base ========
* Purpose:
* Load the initial program image, optionally with command-line arguments,
@@ -482,14 +403,6 @@ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
int status;
u32 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(num_argc > 0);
- DBC_REQUIRE(args != NULL);
- DBC_REQUIRE(args[0] != NULL);
- DBC_REQUIRE(pfn_write != NULL);
- DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
-
/*
* Make sure every argv[] stated in argc has a value, or change argc to
* reflect true number in NULL terminated argv array.
@@ -538,12 +451,6 @@ int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct cod_libraryobj *lib = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
- DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
- DBC_REQUIRE(lib_obj != NULL);
-
*lib_obj = NULL;
lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
@@ -575,10 +482,6 @@ int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct dbll_library_obj *lib;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
-
/* if we previously opened a base image, close it now */
if (hmgr->base_lib) {
if (hmgr->loaded) {
@@ -612,12 +515,6 @@ int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(str_content != NULL);
-
if (lib != NULL)
status =
lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 31da62b14bc..9f07036cd41 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -21,8 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
#include <dspbridge/gh.h>
/* ----------------------------------- OS Adaptation Layer */
@@ -189,8 +187,6 @@ static u16 name_hash(void *key, u16 max_bucket);
static bool name_match(void *key, void *sp);
static void sym_delete(void *value);
-static u32 refs; /* module reference count */
-
/* Symbol Redefinition */
static int redefined_symbol;
static int gbl_search = 1;
@@ -202,9 +198,6 @@ void dbll_close(struct dbll_library_obj *zl_lib)
{
struct dbll_tar_obj *zl_target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->open_ref > 0);
zl_target = zl_lib->target_obj;
zl_lib->open_ref--;
if (zl_lib->open_ref == 0) {
@@ -241,10 +234,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
struct dbll_tar_obj *pzl_target;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(target_obj != NULL);
-
/* Allocate DBL target object */
pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
if (target_obj != NULL) {
@@ -255,8 +244,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
pzl_target->attrs = *pattrs;
*target_obj = (struct dbll_tar_obj *)pzl_target;
}
- DBC_ENSURE((!status && *target_obj) ||
- (status && *target_obj == NULL));
}
return status;
@@ -269,9 +256,6 @@ void dbll_delete(struct dbll_tar_obj *target)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
-
kfree(zl_target);
}
@@ -282,14 +266,7 @@ void dbll_delete(struct dbll_tar_obj *target)
*/
void dbll_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0)
- gh_exit();
-
- DBC_ENSURE(refs >= 0);
+ /* do nothing */
}
/*
@@ -302,12 +279,6 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
struct dbll_symbol *sym;
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
if (sym != NULL) {
*sym_val = &sym->value;
@@ -327,10 +298,6 @@ void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(pattrs != NULL);
-
if ((pattrs != NULL) && (zl_target != NULL))
*pattrs = zl_target->attrs;
@@ -347,12 +314,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
char cname[MAXEXPR + 1];
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
- DBC_REQUIRE(name != NULL);
-
cname[0] = '_';
strncpy(cname + 1, name, sizeof(cname) - 2);
@@ -382,12 +343,6 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE(psize != NULL);
- DBC_REQUIRE(zl_lib);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -434,12 +389,7 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
*/
bool dbll_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0)
- gh_init();
-
- refs++;
+ /* do nothing */
return true;
}
@@ -456,10 +406,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
s32 err;
int status = 0;
bool opened_doff = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(entry != NULL);
- DBC_REQUIRE(attrs != NULL);
/*
* Load if not already loaded.
@@ -558,8 +504,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
if (opened_doff)
dof_close(zl_lib);
- DBC_ENSURE(status || zl_lib->load_ref > 0);
-
dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
__func__, lib, flags, entry, status);
@@ -577,12 +521,6 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
s32 err;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(zl_target->attrs.fopen != NULL);
- DBC_REQUIRE(file != NULL);
- DBC_REQUIRE(lib_obj != NULL);
-
zl_lib = zl_target->head;
while (zl_lib != NULL) {
if (strcmp(zl_lib->file_name, file) == 0) {
@@ -699,8 +637,6 @@ func_cont:
dbll_close((struct dbll_library_obj *)zl_lib);
}
- DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
- || (status && *lib_obj == NULL));
dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
__func__, target, file, lib_obj, status);
@@ -722,12 +658,6 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
const struct ldr_section_info *sect = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(buf != NULL);
- DBC_REQUIRE(size != 0);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -788,14 +718,11 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
s32 err = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->load_ref > 0);
dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
zl_lib->load_ref--;
/* Unload only if reference count is 0 */
if (zl_lib->load_ref != 0)
- goto func_end;
+ return;
zl_lib->target_obj->attrs = *attrs;
if (zl_lib->dload_mod_obj) {
@@ -814,8 +741,6 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
/* delete DOFF desc since it holds *lots* of host OS
* resources */
dof_close(zl_lib);
-func_end:
- DBC_ENSURE(zl_lib->load_ref >= 0);
}
/*
@@ -874,8 +799,6 @@ static u16 name_hash(void *key, u16 max_bucket)
u16 hash;
char *name = (char *)key;
- DBC_REQUIRE(name != NULL);
-
hash = 0;
while (*name) {
@@ -893,9 +816,6 @@ static u16 name_hash(void *key, u16 max_bucket)
*/
static bool name_match(void *key, void *sp)
{
- DBC_REQUIRE(key != NULL);
- DBC_REQUIRE(sp != NULL);
-
if ((key != NULL) && (sp != NULL)) {
if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
0)
@@ -938,10 +858,7 @@ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
bytes_read =
(*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
@@ -960,10 +877,7 @@ static int dbll_set_file_posn(struct dynamic_loader_stream *this,
struct dbll_library_obj *lib;
int status = 0; /* Success */
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
SEEK_SET);
@@ -986,10 +900,7 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
struct dbll_sym_val *dbll_sym = NULL;
bool status = false; /* Symbol not found yet */
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
if (lib->target_obj->attrs.sym_lookup) {
/* Check current lib + base lib + dep lib +
@@ -1015,9 +926,6 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
if (!status && gbl_search)
dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
- DBC_ASSERT((status && (dbll_sym != NULL))
- || (!status && (dbll_sym == NULL)));
-
ret_sym = (struct dynload_symbol *)dbll_sym;
return ret_sym;
}
@@ -1034,11 +942,7 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dbll_symbol *sym;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
- DBC_REQUIRE(lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
ret_sym = (struct dynload_symbol *)&sym->value;
@@ -1059,10 +963,7 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dynload_symbol *ret;
- DBC_REQUIRE(this != NULL);
- DBC_REQUIRE(name);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
/* Check to see if symbol is already defined in symbol table */
if (!(lib->target_obj->attrs.base_image)) {
@@ -1111,10 +1012,7 @@ static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
/* May not need to do anything */
}
@@ -1127,9 +1025,7 @@ static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
struct dbll_library_obj *lib;
void *buf;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
buf = kzalloc(memsize, GFP_KERNEL);
@@ -1144,9 +1040,7 @@ static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
kfree(mem_ptr);
}
@@ -1161,9 +1055,7 @@ static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
struct dbll_library_obj *lib;
char temp_buf[MAXEXPR];
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
dev_dbg(bridge, "%s\n", temp_buf);
}
@@ -1195,9 +1087,7 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
u32 alloc_size = 0;
u32 run_addr_flag = 0;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
mem_sect_type =
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
@@ -1206,7 +1096,6 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
/* Attempt to extract the segment ID and requirement information from
the name of the section */
- DBC_REQUIRE(info->name);
token_len = strlen((char *)(info->name)) + 1;
sz_sect_name = kzalloc(token_len, GFP_KERNEL);
@@ -1307,9 +1196,7 @@ static void rmm_dealloc(struct dynamic_loader_allocate *this,
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
DLOAD_BSS) ? DBLL_BSS :
DBLL_DATA;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
/* segid was set by alloc function */
segid = (u32) info->context;
if (mem_sect_type == DBLL_CODE)
@@ -1347,9 +1234,7 @@ static int read_mem(struct dynamic_loader_initialize *this, void *buf,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Need bridge_brd_read function */
return bytes_read;
}
@@ -1368,7 +1253,6 @@ static int write_mem(struct dynamic_loader_initialize *this, void *buf,
u32 mem_sect_type;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
if (!lib)
return false;
@@ -1415,7 +1299,6 @@ static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
struct dbll_library_obj *lib;
struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
pbuf = NULL;
/* Pass the NULL pointer to write_mem to get the start address of Shared
@@ -1439,9 +1322,7 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
struct dbll_library_obj *lib;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Save entry point */
if (lib != NULL)
lib->entry = (u32) start;
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 522810bc742..6234ffb5e8a 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
#include <dspbridge/drv.h>
@@ -84,9 +81,6 @@ struct drv_ext {
char sz_string[MAXREGPATHLENGTH];
};
-/* ----------------------------------- Globals */
-static u32 refs; /* Module reference count */
-
/* ----------------------------------- Function Prototypes */
static int fxn_not_implemented(int arg, ...);
static int init_cod_mgr(struct dev_object *dev_obj);
@@ -106,11 +100,8 @@ u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
u32 ul_written = 0;
int status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
if (dev_obj) {
/* Require of BrdWrite() */
- DBC_ASSERT(dev_obj->bridge_context != NULL);
status = (*dev_obj->bridge_interface.brd_write) (
dev_obj->bridge_context, host_buf,
dsp_add, ul_num_bytes, mem_space);
@@ -143,9 +134,6 @@ int dev_create_device(struct dev_object **device_obj,
struct drv_object *hdrv_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(driver_file_name != NULL);
status = drv_request_bridge_res_dsp((void *)&host_res);
@@ -169,7 +157,6 @@ int dev_create_device(struct dev_object **device_obj,
/* Create the device object, and pass a handle to the Bridge driver for
* storage. */
if (!status) {
- DBC_ASSERT(drv_fxns);
dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
if (dev_obj) {
/* Fill out the rest of the Dev Object structure: */
@@ -191,9 +178,6 @@ int dev_create_device(struct dev_object **device_obj,
status = (dev_obj->bridge_interface.dev_create)
(&dev_obj->bridge_context, dev_obj,
host_res);
- /* Assert bridge_dev_create()'s ensure clause: */
- DBC_ASSERT(status
- || (dev_obj->bridge_context != NULL));
} else {
status = -ENOMEM;
}
@@ -271,7 +255,6 @@ leave:
*device_obj = NULL;
}
- DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
return status;
}
@@ -287,17 +270,11 @@ int dev_create2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
/* There can be only one Node Manager per DEV object */
- DBC_ASSERT(!dev_obj->node_mgr);
status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
if (status)
dev_obj->node_mgr = NULL;
- DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
- || (status && dev_obj->node_mgr == NULL));
return status;
}
@@ -311,9 +288,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
if (dev_obj->node_mgr) {
if (node_delete_mgr(dev_obj->node_mgr))
status = -EPERM;
@@ -322,7 +296,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
}
- DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
return status;
}
@@ -337,8 +310,6 @@ int dev_destroy_device(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj) {
if (dev_obj->cod_mgr) {
cod_delete(dev_obj->cod_mgr);
@@ -415,9 +386,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->chnl_mgr;
} else {
@@ -425,7 +393,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -441,9 +408,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->cmm_mgr;
} else {
@@ -451,7 +415,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -467,9 +430,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->dmm_mgr;
} else {
@@ -477,7 +437,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -492,9 +451,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr != NULL);
-
if (hdev_obj) {
*cod_mgr = dev_obj->cod_mgr;
} else {
@@ -502,7 +458,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
return status;
}
@@ -514,9 +469,6 @@ int dev_get_deh_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(deh_manager != NULL);
- DBC_REQUIRE(hdev_obj);
if (hdev_obj) {
*deh_manager = hdev_obj->deh_mgr;
} else {
@@ -537,9 +489,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_nde != NULL);
-
if (hdev_obj) {
*dev_nde = dev_obj->dev_node_obj;
} else {
@@ -547,7 +496,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
return status;
}
@@ -578,9 +526,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(if_fxns != NULL);
-
if (hdev_obj) {
*if_fxns = &dev_obj->bridge_interface;
} else {
@@ -588,7 +533,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
return status;
}
@@ -600,10 +544,6 @@ int dev_get_io_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
if (hdev_obj) {
*io_man = hdev_obj->iomgr;
} else {
@@ -638,10 +578,6 @@ struct dev_object *dev_get_next(struct dev_object *hdev_obj)
*/
void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
*msg_man = hdev_obj->msg_mgr;
}
@@ -656,9 +592,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
-
if (hdev_obj) {
*node_man = dev_obj->node_mgr;
} else {
@@ -666,7 +599,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
return status;
}
@@ -679,9 +611,6 @@ int dev_get_symbol(struct dev_object *hdev_obj,
int status = 0;
struct cod_manager *cod_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
-
if (hdev_obj) {
status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr)
@@ -706,9 +635,6 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(phbridge_context != NULL);
-
if (hdev_obj) {
*phbridge_context = dev_obj->bridge_context;
} else {
@@ -716,67 +642,10 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((phbridge_context != NULL) &&
- (*phbridge_context == NULL)));
return status;
}
/*
- * ======== dev_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- */
-void dev_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0) {
- cmm_exit();
- dmm_exit();
- }
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== dev_init ========
- * Purpose:
- * Initialize DEV's private state, keeping a reference count on each call.
- */
-bool dev_init(void)
-{
- bool cmm_ret, dmm_ret, ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- cmm_ret = cmm_init();
- dmm_ret = dmm_init();
-
- ret = cmm_ret && dmm_ret;
-
- if (!ret) {
- if (cmm_ret)
- cmm_exit();
-
- if (dmm_ret)
- dmm_exit();
-
- }
- }
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== dev_notify_clients ========
* Purpose:
* Notify all clients of this device of a change in device status.
@@ -841,14 +710,11 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj)
dev_obj->chnl_mgr = hmgr;
else
status = -EFAULT;
- DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
return status;
}
@@ -859,9 +725,6 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
*/
void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
hdev_obj->msg_mgr = hmgr;
}
@@ -879,8 +742,6 @@ int dev_start_device(struct cfg_devnode *dev_node_obj)
struct mgr_object *hmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
-
/* Given all resources, create a device object. */
status = dev_create_device(&hdev_obj, bridge_file_name,
dev_node_obj);
@@ -944,9 +805,6 @@ static int init_cod_mgr(struct dev_object *dev_obj)
int status = 0;
char *sz_dummy_file = "dummy";
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
-
status = cod_create(&dev_obj->cod_mgr, sz_dummy_file);
return status;
@@ -976,10 +834,6 @@ int dev_insert_proc_object(struct dev_object *hdev_obj,
{
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(already_attached != NULL);
if (!list_empty(&dev_obj->proc_list))
*already_attached = true;
@@ -1017,10 +871,6 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
struct list_head *cur_elem;
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(!list_empty(&dev_obj->proc_list));
-
/* Search list for dev_obj: */
list_for_each(cur_elem, &dev_obj->proc_list) {
if ((u32) cur_elem == proc_obj) {
@@ -1069,10 +919,6 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
(intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
(cast)fxn_not_implemented))
- DBC_REQUIRE(intf_fxns != NULL);
- DBC_REQUIRE(drv_fxns != NULL);
- DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
- drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
drv_fxns->brd_api_minor_version);
intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
@@ -1119,33 +965,5 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id);
}
/* Add code for any additional functions in newerBridge versions here */
- /* Ensure postcondition: */
- DBC_ENSURE(intf_fxns->dev_create != NULL);
- DBC_ENSURE(intf_fxns->dev_destroy != NULL);
- DBC_ENSURE(intf_fxns->dev_cntrl != NULL);
- DBC_ENSURE(intf_fxns->brd_monitor != NULL);
- DBC_ENSURE(intf_fxns->brd_start != NULL);
- DBC_ENSURE(intf_fxns->brd_stop != NULL);
- DBC_ENSURE(intf_fxns->brd_status != NULL);
- DBC_ENSURE(intf_fxns->brd_read != NULL);
- DBC_ENSURE(intf_fxns->brd_write != NULL);
- DBC_ENSURE(intf_fxns->chnl_create != NULL);
- DBC_ENSURE(intf_fxns->chnl_destroy != NULL);
- DBC_ENSURE(intf_fxns->chnl_open != NULL);
- DBC_ENSURE(intf_fxns->chnl_close != NULL);
- DBC_ENSURE(intf_fxns->chnl_add_io_req != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_ioc != NULL);
- DBC_ENSURE(intf_fxns->chnl_cancel_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_flush_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_mgr_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_idle != NULL);
- DBC_ENSURE(intf_fxns->chnl_register_notify != NULL);
- DBC_ENSURE(intf_fxns->io_create != NULL);
- DBC_ENSURE(intf_fxns->io_destroy != NULL);
- DBC_ENSURE(intf_fxns->io_on_loaded != NULL);
- DBC_ENSURE(intf_fxns->io_get_proc_load != NULL);
- DBC_ENSURE(intf_fxns->msg_set_queue_id != NULL);
-
#undef STORE_FXN
}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 8685233d762..7c9f8391606 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -28,9 +28,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -54,8 +51,6 @@ struct dmm_object {
spinlock_t dmm_lock; /* Lock to access dmm mgr */
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
struct map_page {
u32 region_size:15;
u32 mapped_size:15;
@@ -123,8 +118,6 @@ int dmm_create(struct dmm_object **dmm_manager,
{
struct dmm_object *dmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
*dmm_manager = NULL;
/* create, zero, and tag a cmm mgr object */
@@ -149,7 +142,6 @@ int dmm_destroy(struct dmm_object *dmm_mgr)
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (dmm_mgr) {
status = dmm_delete_tables(dmm_obj);
if (!status)
@@ -169,7 +161,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
{
int status = 0;
- DBC_REQUIRE(refs > 0);
/* Delete all DMM tables */
if (dmm_mgr)
vfree(virtual_mapping_table);
@@ -179,19 +170,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
}
/*
- * ======== dmm_exit ========
- * Purpose:
- * Discontinue usage of module; free resources when reference count
- * reaches 0.
- */
-void dmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-}
-
-/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
@@ -202,8 +180,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -216,28 +192,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
}
/*
- * ======== dmm_init ========
- * Purpose:
- * Initializes private state of DMM module.
- */
-bool dmm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- virtual_mapping_table = NULL;
- table_size = 0;
-
- return ret;
-}
-
-/*
* ======== dmm_map_memory ========
* Purpose:
* Add a mapping block to the reserved chunk. DMM assumes that this block
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 767ffe270ed..9ef1ad9527a 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
@@ -266,25 +263,10 @@ err:
*/
void api_exit(void)
{
- DBC_REQUIRE(api_c_refs > 0);
api_c_refs--;
- if (api_c_refs == 0) {
- /* Release all modules initialized in api_init(). */
- cod_exit();
- dev_exit();
- chnl_exit();
- msg_exit();
- io_exit();
- strm_exit();
- disp_exit();
- node_exit();
- proc_exit();
+ if (api_c_refs == 0)
mgr_exit();
- rmm_exit();
- drv_exit();
- }
- DBC_ENSURE(api_c_refs >= 0);
}
/*
@@ -295,64 +277,10 @@ void api_exit(void)
bool api_init(void)
{
bool ret = true;
- bool fdrv, fdev, fcod, fchnl, fmsg, fio;
- bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
-
- if (api_c_refs == 0) {
- /* initialize driver and other modules */
- fdrv = drv_init();
- fmgr = mgr_init();
- fproc = proc_init();
- fnode = node_init();
- fdisp = disp_init();
- fstrm = strm_init();
- frmm = rmm_init();
- fchnl = chnl_init();
- fmsg = msg_mod_init();
- fio = io_init();
- fdev = dev_init();
- fcod = cod_init();
- ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
- ret = ret && fmgr && fproc && frmm;
- if (!ret) {
- if (fdrv)
- drv_exit();
-
- if (fmgr)
- mgr_exit();
-
- if (fstrm)
- strm_exit();
-
- if (fproc)
- proc_exit();
-
- if (fnode)
- node_exit();
-
- if (fdisp)
- disp_exit();
-
- if (fchnl)
- chnl_exit();
-
- if (fmsg)
- msg_exit();
-
- if (fio)
- io_exit();
-
- if (fdev)
- dev_exit();
-
- if (fcod)
- cod_exit();
-
- if (frmm)
- rmm_exit();
- }
- }
+ if (api_c_refs == 0)
+ ret = mgr_init();
+
if (ret)
api_c_refs++;
@@ -382,8 +310,6 @@ int api_init_complete2(void)
struct drv_data *drv_datap;
u8 dev_type;
- DBC_REQUIRE(api_c_refs > 0);
-
/* Walk the list of DevObjects, get each devnode, and attempting to
* autostart the board. Note that this requires COF loading, which
* requires KFILE. */
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
index 65245f310f8..4073c9c672f 100644
--- a/drivers/staging/tidspbridge/pmgr/io.c
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
@@ -33,9 +30,6 @@
#include <ioobj.h>
#include <dspbridge/io.h>
-/* ----------------------------------- Globals */
-static u32 refs;
-
/*
* ======== io_create ========
* Purpose:
@@ -50,10 +44,6 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
struct io_mgr_ *pio_mgr = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*io_man = NULL;
/* A memory base of 0 implies no memory base: */
@@ -94,8 +84,6 @@ int io_destroy(struct io_mgr *hio_mgr)
struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
int status;
- DBC_REQUIRE(refs > 0);
-
intf_fxns = pio_mgr->intf_fxns;
/* Let Bridge channel module destroy the io_mgr: */
@@ -103,36 +91,3 @@ int io_destroy(struct io_mgr *hio_mgr)
return status;
}
-
-/*
- * ======== io_exit ========
- * Purpose:
- * Discontinue usage of the IO module.
- */
-void io_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== io_init ========
- * Purpose:
- * Initialize the IO module's private state.
- */
-bool io_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
index a6916039eed..f093cfb51c0 100644
--- a/drivers/staging/tidspbridge/pmgr/msg.c
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdefs.h>
@@ -36,9 +33,6 @@
#include <msgobj.h>
#include <dspbridge/msg.h>
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/*
* ======== msg_create ========
* Purpose:
@@ -53,11 +47,6 @@ int msg_create(struct msg_mgr **msg_man,
struct msg_mgr *hmsg_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(msg_callback != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*msg_man = NULL;
dev_get_intf_fxns(hdev_obj, &intf_fxns);
@@ -90,8 +79,6 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(refs > 0);
-
if (msg_mgr_obj) {
intf_fxns = msg_mgr_obj->intf_fxns;
@@ -102,28 +89,3 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
__func__, hmsg_mgr);
}
}
-
-/*
- * ======== msg_exit ========
- */
-void msg_exit(void)
-{
- DBC_REQUIRE(refs > 0);
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== msg_mod_init ========
- */
-bool msg_mod_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- DBC_ENSURE(refs >= 0);
-
- return true;
-}
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index fda240214cd..12a1d34b395 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -29,8 +29,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
@@ -85,8 +83,6 @@ int dcd_auto_register(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -107,8 +103,6 @@ int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -131,9 +125,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
int status = 0;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(dcd_mgr);
-
status = cod_create(&cod_mgr, sz_zl_dll_name);
if (status)
goto func_end;
@@ -156,9 +147,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
cod_delete(cod_mgr);
}
- DBC_ENSURE((!status) ||
- ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
-
func_end:
return status;
}
@@ -173,8 +161,6 @@ int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
int status = -EFAULT;
- DBC_REQUIRE(refs >= 0);
-
if (hdcd_mgr) {
/* Delete the COD manager. */
cod_delete(dcd_mgr_obj->cod_mgr);
@@ -205,10 +191,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
struct dcd_key_elem *dcd_key;
int len;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(uuid_obj != NULL);
-
if ((index != 0) && (enum_refs == 0)) {
/*
* If an enumeration is being performed on an index greater
@@ -222,7 +204,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with
* obj_type. */
@@ -294,8 +275,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
}
}
- DBC_ENSURE(uuid_obj || (status == -EPERM));
-
return status;
}
@@ -307,11 +286,9 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
void dcd_exit(void)
{
struct dcd_key_elem *rv, *rv_tmp;
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0) {
- cod_exit();
list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) {
list_del(&rv->link);
kfree(rv->path);
@@ -319,7 +296,6 @@ void dcd_exit(void)
}
}
- DBC_ENSURE(refs >= 0);
}
/*
@@ -333,12 +309,6 @@ int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(dep_lib_uuids != NULL);
- DBC_REQUIRE(prstnt_dep_libs != NULL);
-
status =
get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
prstnt_dep_libs, phase);
@@ -356,12 +326,6 @@ int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(num_pers_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
NULL, NULL, phase);
@@ -393,10 +357,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
u32 dw_key_len; /* Len of REG key. */
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(obj_def != NULL);
- DBC_REQUIRE(obj_uuid != NULL);
-
sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
if (!sz_uuid) {
status = -ENOMEM;
@@ -411,7 +371,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
/* Pre-determine final key length. It's length of DCD_REGKEY +
* "_\0" + length of sz_obj_type string + terminating NULL */
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -470,7 +429,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
}
/* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
- DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
/* Create section name based on node UUID. A period is
* pre-pended to the UUID string to form the section name.
@@ -553,7 +511,6 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
struct dsp_uuid dsp_uuid_obj;
s32 object_type;
- DBC_REQUIRE(refs > 0);
if (!hdcd_mgr) {
status = -EFAULT;
goto func_end;
@@ -663,11 +620,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
int status = 0;
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(str_lib_name != NULL);
- DBC_REQUIRE(buff_size != NULL);
- DBC_REQUIRE(hdcd_mgr);
-
dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
" buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
buff_size);
@@ -677,7 +629,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -705,7 +656,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
break;
default:
status = -EINVAL;
- DBC_ASSERT(false);
}
if (!status) {
if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
@@ -787,30 +737,14 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
*/
bool dcd_init(void)
{
- bool init_cod;
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- /* Initialize required modules. */
- init_cod = cod_init();
-
- if (!init_cod) {
- ret = false;
- /* Exit initialized modules. */
- if (init_cod)
- cod_exit();
- }
-
+ if (refs == 0)
INIT_LIST_HEAD(&reg_key_list);
- }
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
-
return ret;
}
@@ -832,15 +766,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
__func__, uuid_obj, obj_type, psz_path_name);
@@ -849,7 +774,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -987,15 +911,6 @@ int dcd_unregister_object(struct dsp_uuid *uuid_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
/*
* When dcd_register_object is called with NULL as pathname,
* it indicates an unregister object operation.
@@ -1055,12 +970,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
s32 entry_id;
#endif
- DBC_REQUIRE(psz_buf != NULL);
- DBC_REQUIRE(ul_buf_size != 0);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
- || (obj_type == DSP_DCDPROCESSORTYPE));
- DBC_REQUIRE(gen_obj != NULL);
-
switch (obj_type) {
case DSP_DCDNODETYPE:
/*
@@ -1082,7 +991,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* ac_name */
- DBC_REQUIRE(token);
token_len = strlen(token);
if (token_len > DSP_MAXNAMELEN - 1)
token_len = DSP_MAXNAMELEN - 1;
@@ -1167,7 +1075,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_create_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_create_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1178,7 +1085,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_execute_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_execute_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1189,7 +1095,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_delete_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_delete_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1421,12 +1326,6 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
u16 dep_libs = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
/* Initialize to 0 dependent libraries, if only counting number of
* dependent libraries */
if (!get_uuids) {
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
index a9aa22f3b4f..4af51b75aea 100644
--- a/drivers/staging/tidspbridge/rmgr/disp.c
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -72,8 +69,6 @@ struct disp_object {
u32 data_mau_size; /* Size of DSP Data MAU */
};
-static u32 refs;
-
static void delete_disp(struct disp_object *disp_obj);
static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
struct node_strmdef strm_def, u32 max,
@@ -96,11 +91,6 @@ int disp_create(struct disp_object **dispatch_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dispatch_obj != NULL);
- DBC_REQUIRE(disp_attrs != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*dispatch_obj = NULL;
/* Allocate Node Dispatcher object */
@@ -168,8 +158,6 @@ func_cont:
else
delete_disp(disp_obj);
- DBC_ENSURE((status && *dispatch_obj == NULL) ||
- (!status && *dispatch_obj));
return status;
}
@@ -179,43 +167,10 @@ func_cont:
*/
void disp_delete(struct disp_object *disp_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
-
delete_disp(disp_obj);
}
/*
- * ======== disp_exit ========
- * Discontinue usage of DISP module.
- */
-void disp_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== disp_init ========
- * Initialize the DISP module.
- */
-bool disp_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
- return ret;
-}
-
-/*
* ======== disp_node_change_priority ========
* Change the priority of a node currently running on the target.
*/
@@ -227,10 +182,6 @@ int disp_node_change_priority(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
/* Send message to RMS to change priority */
rms_cmd = (struct rms_command *)(disp_obj->buf);
rms_cmd->fxn = (rms_word) (rms_fxn);
@@ -276,12 +227,6 @@ int disp_node_create(struct disp_object *disp_obj,
struct dsp_nodeinfo node_info;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
- DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
- DBC_REQUIRE(node_env != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (status)
@@ -292,11 +237,9 @@ int disp_node_create(struct disp_object *disp_obj,
__func__, dev_type);
goto func_end;
}
- DBC_REQUIRE(pargs != NULL);
node_type = node_get_type(hnode);
node_msg_args = pargs->asa.node_msg_args;
max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */
- DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
/* Number of RMS words needed to hold arg data */
dw_length =
@@ -457,7 +400,6 @@ int disp_node_create(struct disp_object *disp_obj,
}
if (!status) {
ul_bytes = total * sizeof(rms_word);
- DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
status = send_message(disp_obj, node_get_timeout(hnode),
ul_bytes, node_env);
}
@@ -480,10 +422,6 @@ int disp_node_delete(struct disp_object *disp_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (!status) {
@@ -521,9 +459,6 @@ int disp_node_run(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
@@ -620,7 +555,6 @@ static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
* 1 from total.
*/
total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
- DBC_REQUIRE(strm_def.sz_device);
dw_length = strlen(strm_def.sz_device) + 1;
/* Number of RMS_WORDS needed to hold device name */
@@ -659,8 +593,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
struct chnl_ioc chnl_ioc_obj;
int status = 0;
- DBC_REQUIRE(pdw_arg != NULL);
-
*pdw_arg = (u32) NULL;
intf_fxns = disp_obj->intf_fxns;
chnl_obj = disp_obj->chnl_to_dsp;
@@ -703,7 +635,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
status = -EPERM;
} else {
if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
- DBC_ASSERT(chnl_ioc_obj.buf == pbuf);
if (*((int *)chnl_ioc_obj.buf) < 0) {
/* Translate DSP's to kernel error */
status = -EREMOTEIO;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index db8215f540d..6795205b015 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -54,7 +51,6 @@ struct drv_ext {
};
/* ----------------------------------- Globals */
-static s32 refs;
static bool ext_phys_mem_pool_enabled;
struct ext_phys_mem_pool {
u32 phys_mem_base;
@@ -172,7 +168,6 @@ void drv_proc_node_update_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->node_allocated = status;
}
@@ -181,7 +176,6 @@ void drv_proc_node_update_heap_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->heap_allocated = status;
}
@@ -308,9 +302,6 @@ int drv_create(struct drv_object **drv_obj)
struct drv_object *pdrv_object = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(drv_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
if (pdrv_object) {
/* Create and Initialize List of device objects */
@@ -336,25 +327,10 @@ int drv_create(struct drv_object **drv_obj)
kfree(pdrv_object);
}
- DBC_ENSURE(status || pdrv_object);
return status;
}
/*
- * ======== drv_exit ========
- * Purpose:
- * Discontinue usage of the DRV module.
- */
-void drv_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== = drv_destroy ======== =
* purpose:
* Invoked during bridge de-initialization
@@ -365,9 +341,6 @@ int drv_destroy(struct drv_object *driver_obj)
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
-
kfree(pdrv_object);
/* Update the DRV Object in the driver data */
if (drv_datap) {
@@ -389,17 +362,8 @@ int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
struct dev_object **device_obj)
{
int status = 0;
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
- /* used only for Assertions and debug messages */
- struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
-#endif
struct dev_object *dev_obj;
u32 i;
- DBC_REQUIRE(pdrv_obj);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(refs > 0);
- DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
dev_obj = (struct dev_object *)drv_get_first_dev_object();
for (i = 0; i < index; i++) {
@@ -524,25 +488,6 @@ u32 drv_get_next_dev_extension(u32 dev_extension)
}
/*
- * ======== drv_init ========
- * Purpose:
- * Initialize DRV module private state.
- */
-int drv_init(void)
-{
- s32 ret = 1; /* function return value */
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== drv_insert_dev_object ========
* Purpose:
* Insert a DevObject into the list of Manager object.
@@ -552,10 +497,6 @@ int drv_insert_dev_object(struct drv_object *driver_obj,
{
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pdrv_object);
-
list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
return 0;
@@ -574,12 +515,6 @@ int drv_remove_dev_object(struct drv_object *driver_obj,
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct list_head *cur_elem;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
- DBC_REQUIRE(hdev_obj != NULL);
-
- DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
-
/* Search list for p_proc_object: */
list_for_each(cur_elem, &pdrv_object->dev_list) {
/* If found, remove it. */
@@ -605,9 +540,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
struct drv_ext *pszdev_node;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(dw_context != 0);
- DBC_REQUIRE(dev_node_strg != NULL);
-
/*
* Allocate memory to hold the string. This will live until
* it is freed in the Release resources. Update the driver object
@@ -639,10 +571,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
*dev_node_strg = 0;
}
- DBC_ENSURE((!status && dev_node_strg != NULL &&
- !list_empty(&pdrv_object->dev_node_string)) ||
- (status && *dev_node_strg == 0));
-
return status;
}
@@ -900,8 +828,6 @@ void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
void mem_free_phys_mem(void *virtual_address, u32 physical_address,
u32 byte_size)
{
- DBC_REQUIRE(virtual_address != NULL);
-
if (!ext_phys_mem_pool_enabled)
dma_free_coherent(NULL, byte_size, virtual_address,
physical_address);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 385740bad0d..3cac0149206 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -16,11 +16,8 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-/* ----------------------------------- Host OS */
-
#include <plat/dsp.h>
-#include <dspbridge/host_os.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
@@ -33,36 +30,25 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/clk.h>
-#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
-#include <dspbridge/dspapi-ioctl.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dspdrv.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/pwr.h>
-/* ----------------------------------- This */
-#include <drv_interface.h>
-
#include <dspbridge/resourcecleanup.h>
-#include <dspbridge/chnl.h>
#include <dspbridge/proc.h>
#include <dspbridge/dev.h>
-#include <dspbridge/drv.h>
#ifdef CONFIG_TIDSPBRIDGE_DVFS
#include <mach-omap2/omap3-opp.h>
#endif
/* ----------------------------------- Globals */
-#define DRIVER_NAME "DspBridge"
#define DSPBRIDGE_VERSION "0.3"
s32 dsp_debug;
@@ -131,7 +117,166 @@ MODULE_AUTHOR("Texas Instruments");
MODULE_LICENSE("GPL");
MODULE_VERSION(DSPBRIDGE_VERSION);
-static char *driver_name = DRIVER_NAME;
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int bridge_open(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt = NULL;
+
+ /*
+ * Allocate a new process context and insert it into global
+ * process context list.
+ */
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ if (filp->f_flags & O_NONBLOCK ||
+ wait_for_completion_interruptible(&bridge_open_comp))
+ return -EBUSY;
+ }
+#endif
+ pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
+ if (!pr_ctxt)
+ return -ENOMEM;
+
+ pr_ctxt->res_state = PROC_RES_ALLOCATED;
+ spin_lock_init(&pr_ctxt->dmm_map_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
+
+ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->node_id) {
+ status = -ENOMEM;
+ goto err1;
+ }
+
+ idr_init(pr_ctxt->node_id);
+
+ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->stream_id) {
+ status = -ENOMEM;
+ goto err2;
+ }
+
+ idr_init(pr_ctxt->stream_id);
+
+ filp->private_data = pr_ctxt;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ atomic_inc(&bridge_cref);
+#endif
+ return 0;
+
+err2:
+ kfree(pr_ctxt->node_id);
+err1:
+ kfree(pr_ctxt);
+ return status;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int bridge_release(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt;
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ pr_ctxt = filp->private_data;
+ flush_signals(current);
+ drv_remove_all_resources(pr_ctxt);
+ proc_detach(pr_ctxt);
+ kfree(pr_ctxt->node_id);
+ kfree(pr_ctxt->stream_id);
+ kfree(pr_ctxt);
+
+ filp->private_data = NULL;
+
+err:
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!atomic_dec_return(&bridge_cref))
+ complete(&bridge_comp);
+#endif
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long bridge_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status;
+ u32 retval = 0;
+ union trapped_args buf_in;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ status = -EIO;
+ goto err;
+ }
+#endif
+#ifdef CONFIG_PM
+ status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
+ if (status != 0)
+ return status;
+#endif
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ status = copy_from_user(&buf_in, (union trapped_args *)args,
+ sizeof(union trapped_args));
+
+ if (!status) {
+ status = api_call_dev_ioctl(code, &buf_in, &retval,
+ filp->private_data);
+
+ if (!status) {
+ status = retval;
+ } else {
+ dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
+ "status 0x%x\n", __func__, code, status);
+ status = -1;
+ }
+
+ }
+
+err:
+ return status;
+}
+
+/* This function maps kernel space memory to user space memory. */
+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ u32 status;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
+ "flags %lx\n", __func__, filp,
+ vma->vm_start, vma->vm_end, vma->vm_page_prot,
+ vma->vm_flags);
+
+ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (status != 0)
+ status = -EAGAIN;
+
+ return status;
+}
static const struct file_operations bridge_fops = {
.open = bridge_open,
@@ -211,10 +356,10 @@ void bridge_recover_schedule(void)
#endif
#ifdef CONFIG_TIDSPBRIDGE_DVFS
static int dspbridge_scale_notification(struct notifier_block *op,
- unsigned long val, void *ptr)
+ unsigned long val, void *ptr)
{
struct omap_dsp_platform_data *pdata =
- omap_dspbridge_dev->dev.platform_data;
+ omap_dspbridge_dev->dev.platform_data;
if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
@@ -319,7 +464,7 @@ err2:
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER);
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
dsp_clk_exit();
@@ -345,7 +490,7 @@ static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
goto err1;
/* use 2.6 device model */
- err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ err = alloc_chrdev_region(&dev, 0, 1, "DspBridge");
if (err) {
pr_err("%s: Can't get major %d\n", __func__, driver_major);
goto err1;
@@ -385,7 +530,6 @@ err1:
static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
{
dev_t devno;
- bool ret;
int status = 0;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
@@ -398,16 +542,15 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
#ifdef CONFIG_TIDSPBRIDGE_DVFS
if (cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER))
+ CPUFREQ_TRANSITION_NOTIFIER))
pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
__func__);
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
if (driver_context) {
/* Put the DSP in reset state */
- ret = dsp_deinit(driver_context);
+ dsp_deinit(driver_context);
driver_context = 0;
- DBC_ASSERT(ret == true);
}
kfree(drv_datap);
@@ -431,7 +574,7 @@ func_cont:
}
#ifdef CONFIG_PM
-static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
+static int bridge_suspend(struct platform_device *pdev, pm_message_t state)
{
u32 status;
u32 command = PWR_EMERGENCYDEEPSLEEP;
@@ -444,7 +587,7 @@ static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int BRIDGE_RESUME(struct platform_device *pdev)
+static int bridge_resume(struct platform_device *pdev)
{
u32 status;
@@ -456,9 +599,6 @@ static int BRIDGE_RESUME(struct platform_device *pdev)
wake_up(&bridge_suspend_data.suspend_wq);
return 0;
}
-#else
-#define BRIDGE_SUSPEND NULL
-#define BRIDGE_RESUME NULL
#endif
static struct platform_driver bridge_driver = {
@@ -467,8 +607,10 @@ static struct platform_driver bridge_driver = {
},
.probe = omap34_xx_bridge_probe,
.remove = __devexit_p(omap34_xx_bridge_remove),
- .suspend = BRIDGE_SUSPEND,
- .resume = BRIDGE_RESUME,
+#ifdef CONFIG_PM
+ .suspend = bridge_suspend,
+ .resume = bridge_resume,
+#endif
};
static int __init bridge_init(void)
@@ -481,170 +623,6 @@ static void __exit bridge_exit(void)
platform_driver_unregister(&bridge_driver);
}
-/*
- * This function is called when an application opens handle to the
- * bridge driver.
- */
-static int bridge_open(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt = NULL;
-
- /*
- * Allocate a new process context and insert it into global
- * process context list.
- */
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- if (filp->f_flags & O_NONBLOCK ||
- wait_for_completion_interruptible(&bridge_open_comp))
- return -EBUSY;
- }
-#endif
- pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
- if (!pr_ctxt)
- return -ENOMEM;
-
- pr_ctxt->res_state = PROC_RES_ALLOCATED;
- spin_lock_init(&pr_ctxt->dmm_map_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
- spin_lock_init(&pr_ctxt->dmm_rsv_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
-
- pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->node_id) {
- status = -ENOMEM;
- goto err1;
- }
-
- idr_init(pr_ctxt->node_id);
-
- pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->stream_id) {
- status = -ENOMEM;
- goto err2;
- }
-
- idr_init(pr_ctxt->stream_id);
-
- filp->private_data = pr_ctxt;
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- atomic_inc(&bridge_cref);
-#endif
- return 0;
-
-err2:
- kfree(pr_ctxt->node_id);
-err1:
- kfree(pr_ctxt);
- return status;
-}
-
-/*
- * This function is called when an application closes handle to the bridge
- * driver.
- */
-static int bridge_release(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt;
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- pr_ctxt = filp->private_data;
- flush_signals(current);
- drv_remove_all_resources(pr_ctxt);
- proc_detach(pr_ctxt);
- kfree(pr_ctxt->node_id);
- kfree(pr_ctxt->stream_id);
- kfree(pr_ctxt);
-
- filp->private_data = NULL;
-
-err:
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (!atomic_dec_return(&bridge_cref))
- complete(&bridge_comp);
-#endif
- return status;
-}
-
-/* This function provides IO interface to the bridge driver. */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args)
-{
- int status;
- u32 retval = 0;
- union trapped_args buf_in;
-
- DBC_REQUIRE(filp != NULL);
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- status = -EIO;
- goto err;
- }
-#endif
-#ifdef CONFIG_PM
- status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
- if (status != 0)
- return status;
-#endif
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- status = copy_from_user(&buf_in, (union trapped_args *)args,
- sizeof(union trapped_args));
-
- if (!status) {
- status = api_call_dev_ioctl(code, &buf_in, &retval,
- filp->private_data);
-
- if (!status) {
- status = retval;
- } else {
- dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
- "status 0x%x\n", __func__, code, status);
- status = -1;
- }
-
- }
-
-err:
- return status;
-}
-
-/* This function maps kernel space memory to user space memory. */
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- u32 offset = vma->vm_pgoff << PAGE_SHIFT;
- u32 status;
-
- DBC_ASSERT(vma->vm_start < vma->vm_end);
-
- vma->vm_flags |= VM_RESERVED | VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
- "%lx flags %lx\n", __func__, filp, offset,
- vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
-
- status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (status != 0)
- status = -EAGAIN;
-
- return status;
-}
-
/* To remove all process resources before removing the process from the
* process context list */
int drv_remove_all_resources(void *process_ctxt)
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.h b/drivers/staging/tidspbridge/rmgr/drv_interface.h
deleted file mode 100644
index ab070602adc..00000000000
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drv_interface.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2005-2006 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _DRV_INTERFACE_H_
-#define _DRV_INTERFACE_H_
-
-/* Prototypes for all functions in this bridge */
-static int __init bridge_init(void); /* Initialize bridge */
-static void __exit bridge_exit(void); /* Opposite of initialize */
-static int bridge_open(struct inode *ip, struct file *filp); /* Open */
-static int bridge_release(struct inode *ip, struct file *filp); /* Release */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args);
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
-#endif /* ifndef _DRV_INTERFACE_H_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
index 7a6fc737872..dc767b183cd 100644
--- a/drivers/staging/tidspbridge/rmgr/dspdrv.c
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -102,8 +99,6 @@ func_cont:
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
- DBC_ENSURE((!status && drv_obj != NULL) ||
- (status && drv_obj == NULL));
*init_status = status;
/* Return the Driver Object */
return (u32) drv_obj;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index d635c01c015..8a1e9287cff 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -62,9 +59,6 @@ int mgr_create(struct mgr_object **mgr_obj,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(mgr_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
@@ -92,7 +86,6 @@ int mgr_create(struct mgr_object **mgr_obj,
status = -ENOMEM;
}
- DBC_ENSURE(status || pmgr_obj);
return status;
}
@@ -106,9 +99,6 @@ int mgr_destroy(struct mgr_object *hmgr_obj)
struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr_obj);
-
/* Free resources */
if (hmgr_obj->dcd_mgr)
dcd_destroy_manager(hmgr_obj->dcd_mgr);
@@ -140,11 +130,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(pndb_props != NULL);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
- DBC_REQUIRE(refs > 0);
-
*pu_num_nodes = 0;
/* Get the Manager Object from the driver data */
if (!drv_datap || !drv_datap->mgr_object) {
@@ -153,7 +138,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
}
pmgr_obj = drv_datap->mgr_object;
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit failed or no more items in the
* Enumeration. We will exit the loop other than 0; */
while (!status) {
@@ -205,11 +189,6 @@ int mgr_enum_processor_info(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
bool proc_detect = false;
- DBC_REQUIRE(processor_info != NULL);
- DBC_REQUIRE(pu_num_procs != NULL);
- DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
- DBC_REQUIRE(refs > 0);
-
*pu_num_procs = 0;
/* Retrieve the Object handle from the driver data */
@@ -242,7 +221,6 @@ int mgr_enum_processor_info(u32 processor_id,
dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
goto func_end;
}
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit no more items in the
* Enumeration. We will exit the loop other than 0; */
while (status1 == 0) {
@@ -310,12 +288,9 @@ func_end:
*/
void mgr_exit(void)
{
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0)
dcd_exit();
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -328,16 +303,11 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
int status = -EPERM;
struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dcd_handle != NULL);
-
*dcd_handle = (u32) NULL;
if (pmgr_obj) {
*dcd_handle = (u32) pmgr_obj->dcd_mgr;
status = 0;
}
- DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
- (status && *dcd_handle == (u32) NULL));
return status;
}
@@ -349,22 +319,13 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
bool mgr_init(void)
{
bool ret = true;
- bool init_dcd = false;
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- init_dcd = dcd_init(); /* DCD Module */
-
- if (!init_dcd)
- ret = false;
- }
+ if (refs == 0)
+ ret = dcd_init(); /* DCD Module */
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -380,8 +341,6 @@ int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
struct sync_object *sync_events[MAX_EVENTS];
u32 i;
- DBC_REQUIRE(count < MAX_EVENTS);
-
for (i = 0; i < count; i++)
sync_events[i] = anotifications[i]->handle;
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 0e70cba15eb..30d5480fcdc 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -22,8 +22,6 @@
#include <dspbridge/dbdefs.h>
-#include <dspbridge/dbc.h>
-
/* Platform manager */
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
@@ -265,8 +263,6 @@ static struct dbll_fxns ldr_fxns = {
(dbll_unload_fxn) dbll_unload,
};
-static u32 refs; /* module reference count */
-
static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
u32 addr, u32 bytes);
static int add_ovly_node(struct dsp_uuid *uuid_obj,
@@ -313,11 +309,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
struct nldr_nodeobject *nldr_node_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_props != NULL);
- DBC_REQUIRE(nldr_nodeobj != NULL);
- DBC_REQUIRE(nldr_obj);
-
/* Initialize handle in case of failure */
*nldr_nodeobj = NULL;
/* Allocate node object */
@@ -398,8 +389,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
if (status && nldr_node_obj)
kfree(nldr_node_obj);
- DBC_ENSURE((!status && *nldr_nodeobj)
- || (status && *nldr_nodeobj == NULL));
return status;
}
@@ -425,12 +414,6 @@ int nldr_create(struct nldr_object **nldr,
struct rmm_segment *rmm_segs = NULL;
u16 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(pattrs->ovly != NULL);
- DBC_REQUIRE(pattrs->write != NULL);
/* Allocate dynamic loader object */
nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
@@ -440,13 +423,10 @@ int nldr_create(struct nldr_object **nldr,
dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr) {
status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
- DBC_ASSERT(!status);
status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
- DBC_ASSERT(!status);
status =
cod_get_base_name(cod_mgr, sz_zl_file,
COD_MAXPATHLENGTH);
- DBC_ASSERT(!status);
}
status = 0;
/* end lazy status checking */
@@ -547,7 +527,6 @@ int nldr_create(struct nldr_object **nldr,
status =
cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
/* lazy check */
- DBC_ASSERT(!status);
/* First count number of overlay nodes */
status =
dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
@@ -583,7 +562,6 @@ int nldr_create(struct nldr_object **nldr,
*nldr = NULL;
}
/* FIXME:Temp. Fix. Must be removed */
- DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
return status;
}
@@ -595,8 +573,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
struct ovly_sect *ovly_section;
struct ovly_sect *next;
u16 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_obj);
nldr_obj->ldr_fxns.exit_fxn();
if (nldr_obj->rmm)
@@ -644,22 +620,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
}
/*
- * ======== nldr_exit ========
- * Discontinue usage of NLDR module.
- */
-void nldr_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0)
- rmm_exit();
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== nldr_get_fxn_addr ========
*/
int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
@@ -671,10 +631,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(str_fxn != NULL);
nldr_obj = nldr_node_obj->nldr_obj;
/* Called from node_create(), node_delete(), or node_run(). */
@@ -690,7 +646,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
root = nldr_node_obj->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -760,7 +715,6 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
{
int status = 0;
struct nldr_object *nldr_obj = nldr;
- DBC_REQUIRE(rmm_mgr != NULL);
if (nldr) {
*rmm_mgr = nldr_obj->rmm;
@@ -769,29 +723,10 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
status = -EFAULT;
}
- DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
-
return status;
}
/*
- * ======== nldr_init ========
- * Initialize the NLDR module.
- */
-bool nldr_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0)
- rmm_init();
-
- refs++;
-
- DBC_ENSURE(refs > 0);
- return true;
-}
-
-/*
* ======== nldr_load ========
*/
int nldr_load(struct nldr_nodeobject *nldr_node_obj,
@@ -801,9 +736,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
struct dsp_uuid lib_uuid;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
nldr_obj = nldr_node_obj->nldr_obj;
if (nldr_node_obj->dynamic) {
@@ -839,7 +771,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
}
@@ -863,9 +794,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
struct lib_node *root_lib = NULL;
s32 i = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
if (nldr_node_obj != NULL) {
if (nldr_node_obj->dynamic) {
if (*nldr_node_obj->phase_split) {
@@ -889,7 +817,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
nldr_node_obj->pers_libs = 0;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -929,7 +856,6 @@ static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
/* Find the node it belongs to */
for (i = 0; i < nldr_obj->ovly_nodes; i++) {
node_name = nldr_obj->ovly_table[i].node_name;
- DBC_REQUIRE(node_name);
if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
/* Found the node */
break;
@@ -1018,8 +944,6 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
/* Add node to table */
nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
*uuid_obj;
- DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
- ac_name);
len =
strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
@@ -1129,7 +1053,6 @@ static void free_sects(struct nldr_object *nldr_obj,
ret =
rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
ovly_section->size, true);
- DBC_ASSERT(ret);
ovly_section = ovly_section->next_sect;
i++;
}
@@ -1249,7 +1172,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
if (depth > MAXDEPTH) {
/* Error */
- DBC_ASSERT(false);
}
root->lib = NULL;
/* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
@@ -1312,7 +1234,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
&uuid, &nd_libs, &np_libs, phase);
}
- DBC_ASSERT(nd_libs >= np_libs);
if (!status) {
if (!(*nldr_node_obj->phase_split))
np_libs = 0;
@@ -1474,7 +1395,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node) {
status = -ENOENT;
@@ -1500,7 +1420,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
@@ -1623,9 +1542,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
bool mem_load_req = false;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
- mem_sect == DBLL_BSS);
nldr_obj = hnode->nldr_obj;
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1651,7 +1567,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
mem_phase_bit = EXECUTEDATAFLAGBIT;
break;
default:
- DBC_ASSERT(false);
break;
}
if (mem_sect == DBLL_CODE)
@@ -1670,11 +1585,9 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
/* Find an appropriate segment based on mem_sect */
if (segid == NULLID) {
/* No memory requirements of preferences */
- DBC_ASSERT(!mem_load_req);
goto func_cont;
}
if (segid <= MAXSEGID) {
- DBC_ASSERT(segid < nldr_obj->dload_segs);
/* Attempt to allocate from segid first. */
rmm_addr_obj->segid = segid;
status =
@@ -1685,7 +1598,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
}
} else {
/* segid > MAXSEGID ==> Internal or external memory */
- DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
/* Check for any internal or external memory segment,
* depending on segid. */
mem_sect_type |= segid == MEMINTERNALID ?
@@ -1736,8 +1648,6 @@ static int remote_free(void **ref, u16 space, u32 dsp_address,
u32 word_size;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(nldr_obj);
-
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1761,7 +1671,6 @@ static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
u16 i;
- DBC_ASSERT(root != NULL);
/* Unload dependent libraries */
for (i = 0; i < root->dep_libs; i++)
@@ -1812,7 +1721,6 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node)
/* TODO: Should we print warning here? */
@@ -1839,14 +1747,11 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
other_alloc = po_node->other_sects;
break;
default:
- DBC_ASSERT(false);
break;
}
- DBC_ASSERT(ref_count && (*ref_count > 0));
if (ref_count && (*ref_count > 0)) {
*ref_count -= 1;
if (other_ref) {
- DBC_ASSERT(*other_ref > 0);
*other_ref -= 1;
}
}
@@ -1897,9 +1802,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(offset_output != NULL);
- DBC_REQUIRE(sym_name != NULL);
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
sym_addr, offset_range, (u32) offset_output, sym_name);
@@ -1915,7 +1817,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
root = nldr_node->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 5dadaa445ad..7fb426c5251 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
@@ -162,7 +159,6 @@ struct node_mgr {
/* Loader properties */
struct nldr_object *nldr_obj; /* Handle to loader */
struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
- bool loader_init; /* Loader Init function succeeded? */
};
/*
@@ -264,16 +260,12 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space);
-static u32 refs; /* module reference count */
-
/* Dynamic loader functions. */
static struct node_ldr_fxns nldr_fxns = {
nldr_allocate,
nldr_create,
nldr_delete,
- nldr_exit,
nldr_get_fxn_addr,
- nldr_init,
nldr_load,
nldr_unload,
};
@@ -326,11 +318,6 @@ int node_allocate(struct proc_object *hprocessor,
void *node_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(noderes != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
@@ -673,7 +660,6 @@ func_cont:
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
- DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
@@ -696,11 +682,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
bool set_info;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
-
- DBC_REQUIRE(usize > 0);
-
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
@@ -714,7 +695,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
- DBC_ASSERT(NULL);
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
@@ -782,8 +762,6 @@ int node_change_priority(struct node_object *hnode, s32 prio)
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
@@ -854,7 +832,6 @@ int node_connect(struct node_object *node1, u32 stream1,
s8 chnl_mode;
u32 dw_length;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (!node1 || !node2)
return -EFAULT;
@@ -903,7 +880,6 @@ int node_connect(struct node_object *node1, u32 stream1,
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
hnode_mgr = node2->node_mgr;
}
@@ -982,9 +958,6 @@ int node_connect(struct node_object *node1, u32 stream1,
goto out_unlock;
}
- DBC_ASSERT((node1_type == NODE_GPP) ||
- (node2_type == NODE_GPP));
-
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
@@ -1139,7 +1112,6 @@ int node_create(struct node_object *hnode)
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
goto func_end;
@@ -1291,10 +1263,6 @@ int node_create_mgr(struct node_mgr **node_man,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
@@ -1366,7 +1334,6 @@ int node_create_mgr(struct node_mgr **node_man,
nldr_attrs_obj.write = mem_write;
nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
- node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
&nldr_attrs_obj);
@@ -1375,8 +1342,6 @@ int node_create_mgr(struct node_mgr **node_man,
*node_man = node_mgr_obj;
- DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
-
return status;
out_err:
delete_node_mgr(node_mgr_obj);
@@ -1409,7 +1374,6 @@ int node_delete(struct node_res_object *noderes,
void *node_res = noderes;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
@@ -1554,8 +1518,6 @@ func_end:
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
- DBC_REQUIRE(refs > 0);
-
if (!hnode_mgr)
return -EFAULT;
@@ -1576,10 +1538,6 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
struct node_object *hnode;
u32 i = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
if (!hnode_mgr) {
status = -EFAULT;
@@ -1605,20 +1563,6 @@ func_end:
}
/*
- * ======== node_exit ========
- * Purpose:
- * Discontinue usage of NODE module.
- */
-void node_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== node_free_msg_buf ========
* Purpose:
* Frees the message buffer.
@@ -1629,10 +1573,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
- DBC_REQUIRE(pnode != NULL);
- DBC_REQUIRE(pnode->xlator != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1653,7 +1593,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
- DBC_ASSERT(NULL); /* BUG */
}
func_end:
return status;
@@ -1669,9 +1608,6 @@ int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattr != NULL);
- DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
if (!hnode)
return -EFAULT;
@@ -1713,9 +1649,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
{
enum node_type node_type;
int status = -EINVAL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
- DBC_REQUIRE(chan_id != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1734,7 +1667,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
}
}
} else {
- DBC_ASSERT(dir == DSP_FROMNODE);
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
@@ -1761,9 +1693,6 @@ int node_get_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(message != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -1831,14 +1760,12 @@ int node_get_nldr_obj(struct node_mgr *hnode_mgr,
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
- DBC_REQUIRE(nldr_ovlyobj != NULL);
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
- DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
return status;
}
@@ -1852,8 +1779,6 @@ int node_get_strm_mgr(struct node_object *hnode,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!hnode)
status = -EFAULT;
else
@@ -1867,8 +1792,6 @@ int node_get_strm_mgr(struct node_object *hnode,
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
@@ -1884,8 +1807,6 @@ enum nldr_loadtype node_get_load_type(struct node_object *hnode)
*/
u32 node_get_timeout(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
@@ -1915,20 +1836,6 @@ enum node_type node_get_type(struct node_object *hnode)
}
/*
- * ======== node_init ========
- * Purpose:
- * Initialize the NODE module.
- */
-bool node_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- return true;
-}
-
-/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node.
@@ -1970,8 +1877,6 @@ int node_pause(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2054,9 +1959,6 @@ int node_put_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pmsg != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2146,9 +2048,6 @@ int node_register_notify(struct node_object *hnode, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2207,8 +2106,6 @@ int node_run(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2287,7 +2184,6 @@ int node_run(struct node_object *hnode)
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
- DBC_ASSERT(false);
}
func_cont1:
/* Update node state. */
@@ -2326,9 +2222,6 @@ int node_terminate(struct node_object *hnode, int *pstatus)
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pstatus != NULL);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
@@ -2610,9 +2503,6 @@ static void delete_node_mgr(struct node_mgr *hnode_mgr)
if (hnode_mgr->nldr_obj)
hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
- if (hnode_mgr->loader_init)
- hnode_mgr->nldr_fxns.exit();
-
kfree(hnode_mgr);
}
}
@@ -2668,7 +2558,6 @@ static void fill_stream_connect(struct node_object *node1,
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
@@ -2748,9 +2637,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
- DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
- node_get_type(hnode) == NODE_DAISSOCKET ||
- node_get_type(hnode) == NODE_MESSAGE);
switch (phase) {
case CREATEPHASE:
@@ -2767,7 +2653,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
break;
default:
/* Should never get here */
- DBC_ASSERT(false);
break;
}
@@ -2787,9 +2672,6 @@ void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(node_info != NULL);
-
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
@@ -2848,9 +2730,7 @@ static int get_node_props(struct dcd_manager *hdcd_mgr,
pmsg_args->max_msgs);
} else {
/* Copy device name */
- DBC_REQUIRE(pndb_props->ac_name);
len = strlen(pndb_props->ac_name);
- DBC_ASSERT(len < MAXDEVNAMELEN);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
@@ -2938,10 +2818,6 @@ int node_get_uuid_props(void *hprocessor,
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
@@ -3063,8 +2939,6 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
-
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
@@ -3106,9 +2980,6 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
-
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 242dd139999..7e4f12f6be4 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -25,9 +25,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -101,8 +98,6 @@ struct proc_object {
struct list_head proc_list;
};
-static u32 refs;
-
DEFINE_MUTEX(proc_lock); /* For critical sections */
/* ----------------------------------- Function Prototypes */
@@ -281,9 +276,6 @@ proc_attach(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_processor != NULL);
-
if (pr_ctxt->processor) {
*ph_processor = pr_ctxt->processor;
return status;
@@ -382,10 +374,6 @@ proc_attach(u32 processor_id,
kfree(p_proc_object);
}
func_end:
- DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
- (!status && p_proc_object) ||
- (status == 0 && p_proc_object));
-
return status;
}
@@ -445,10 +433,6 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_node_obj != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
/* Create a Dummy PROC Object */
if (!drv_datap || !drv_datap->mgr_object) {
status = -ENODATA;
@@ -516,8 +500,6 @@ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
struct proc_object *p_proc_object = hprocessor;
u32 timeout = 0;
- DBC_REQUIRE(refs > 0);
-
if (p_proc_object) {
/* intercept PWR deep sleep command */
if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
@@ -565,8 +547,6 @@ int proc_detach(struct process_context *pr_ctxt)
int status = 0;
struct proc_object *p_proc_object = NULL;
- DBC_REQUIRE(refs > 0);
-
p_proc_object = (struct proc_object *)pr_ctxt->processor;
if (p_proc_object) {
@@ -607,11 +587,6 @@ int proc_enum_nodes(void *hprocessor, void **node_tab,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct node_mgr *hnode_mgr = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
-
if (p_proc_object) {
if (!(dev_get_node_manager(p_proc_object->dev_obj,
&hnode_mgr))) {
@@ -768,8 +743,6 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -810,8 +783,6 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -884,10 +855,6 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
struct rmm_target_obj *rmm = NULL;
struct io_mgr *hio_mgr = NULL; /* IO manager handle */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(resource_info != NULL);
- DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -940,21 +907,6 @@ func_end:
}
/*
- * ======== proc_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- */
-void proc_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== proc_get_dev_object ========
* Purpose:
* Return the Dev Object handle for a given Processor.
@@ -966,9 +918,6 @@ int proc_get_dev_object(void *hprocessor,
int status = -EPERM;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
-
if (p_proc_object) {
*device_obj = p_proc_object->dev_obj;
status = 0;
@@ -977,9 +926,6 @@ int proc_get_dev_object(void *hprocessor,
status = -EFAULT;
}
- DBC_ENSURE((!status && *device_obj != NULL) ||
- (status && *device_obj == NULL));
-
return status;
}
@@ -996,10 +942,6 @@ int proc_get_state(void *hprocessor,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
int brd_status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_state_obj != NULL);
- DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
-
if (p_proc_object) {
/* First, retrieve BRD state information */
status = (*p_proc_object->intf_fxns->brd_status)
@@ -1055,25 +997,6 @@ int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
}
/*
- * ======== proc_init ========
- * Purpose:
- * Initialize PROC's private state, keeping a reference count on each call
- */
-bool proc_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== proc_load ========
* Purpose:
* Reset a processor and load a new base program image.
@@ -1111,10 +1034,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(argc_index > 0);
- DBC_REQUIRE(user_args != NULL);
-
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv1);
#endif
@@ -1202,8 +1121,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (status) {
status = -EPERM;
} else {
- DBC_ASSERT(p_proc_object->last_coff ==
- NULL);
/* Allocate memory for pszLastCoff */
p_proc_object->last_coff =
kzalloc((strlen(user_args[0]) +
@@ -1226,7 +1143,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (!hmsg_mgr) {
status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
(msg_onexit) node_on_exit);
- DBC_ASSERT(!status);
dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
}
}
@@ -1322,7 +1238,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
strlen(pargv0) + 1);
else
status = -ENOMEM;
- DBC_ASSERT(brd_state == BRD_LOADED);
}
}
@@ -1331,9 +1246,6 @@ func_end:
pr_err("%s: Processor failed to load\n", __func__);
proc_stop(p_proc_object);
}
- DBC_ENSURE((!status
- && p_proc_object->proc_state == PROC_LOADED)
- || status);
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv2);
if (tv2.tv_usec < tv1.tv_usec) {
@@ -1443,9 +1355,6 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct deh_mgr *hdeh_mgr;
- DBC_REQUIRE(hnotification != NULL);
- DBC_REQUIRE(refs > 0);
-
/* Check processor handle */
if (!p_proc_object) {
status = -EFAULT;
@@ -1567,7 +1476,6 @@ int proc_start(void *hprocessor)
u32 dw_dsp_addr; /* Loaded code's entry point. */
int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1616,7 +1524,6 @@ func_cont:
if (!((*p_proc_object->intf_fxns->brd_status)
(p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: dsp in running state\n", __func__);
- DBC_ASSERT(brd_state != BRD_HIBERNATION);
}
} else {
pr_err("%s: Failed to start the dsp\n", __func__);
@@ -1624,8 +1531,6 @@ func_cont:
}
func_end:
- DBC_ENSURE((!status && p_proc_object->proc_state ==
- PROC_RUNNING) || status);
return status;
}
@@ -1644,9 +1549,7 @@ int proc_stop(void *hprocessor)
u32 node_tab_size = 1;
u32 num_nodes = 0;
u32 nodes_allocated = 0;
- int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1678,11 +1581,6 @@ int proc_stop(void *hprocessor)
msg_delete(hmsg_mgr);
dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
}
- if (!((*p_proc_object->
- intf_fxns->brd_status) (p_proc_object->
- bridge_context,
- &brd_state)))
- DBC_ASSERT(brd_state == BRD_STOPPED);
}
} else {
pr_err("%s: Failed to stop the processor\n", __func__);
@@ -1820,10 +1718,6 @@ static int proc_monitor(struct proc_object *proc_obj)
{
int status = -EPERM;
struct msg_mgr *hmsg_mgr;
- int brd_state;
-
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_obj);
/* This is needed only when Device is loaded when it is
* already 'ACTIVE' */
@@ -1840,13 +1734,8 @@ static int proc_monitor(struct proc_object *proc_obj)
if (!((*proc_obj->intf_fxns->brd_monitor)
(proc_obj->bridge_context))) {
status = 0;
- if (!((*proc_obj->intf_fxns->brd_status)
- (proc_obj->bridge_context, &brd_state)))
- DBC_ASSERT(brd_state == BRD_IDLE);
}
- DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
- status);
return status;
}
@@ -1880,8 +1769,6 @@ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
{
char **pp_envp = new_envp;
- DBC_REQUIRE(new_envp);
-
/* Prepend new environ var=value string */
*new_envp++ = sz_var;
@@ -1906,9 +1793,6 @@ int proc_notify_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(p_proc_object);
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1930,9 +1814,6 @@ int proc_notify_all_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
index f3dc0ddbfac..52187bd9772 100644
--- a/drivers/staging/tidspbridge/rmgr/rmm.c
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -46,9 +46,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/rmm.h>
@@ -83,8 +80,6 @@ struct rmm_target_obj {
struct list_head ovly_list; /* List of overlay memory in use */
};
-static u32 refs; /* module reference count */
-
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
u32 align, u32 *dsp_address);
static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
@@ -101,12 +96,6 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
u32 addr;
int status = 0;
- DBC_REQUIRE(target);
- DBC_REQUIRE(dsp_address != NULL);
- DBC_REQUIRE(size > 0);
- DBC_REQUIRE(reserve || (target->num_segs > 0));
- DBC_REQUIRE(refs > 0);
-
if (!reserve) {
if (!alloc_block(target, segid, size, align, dsp_address)) {
status = -ENOMEM;
@@ -170,9 +159,6 @@ int rmm_create(struct rmm_target_obj **target_obj,
s32 i;
int status = 0;
- DBC_REQUIRE(target_obj != NULL);
- DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
-
/* Allocate DBL target object */
target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
@@ -235,9 +221,6 @@ func_cont:
}
- DBC_ENSURE((!status && *target_obj)
- || (status && *target_obj == NULL));
-
return status;
}
@@ -251,8 +234,6 @@ void rmm_delete(struct rmm_target_obj *target)
struct rmm_header *next;
u32 i;
- DBC_REQUIRE(target);
-
kfree(target->seg_tab);
list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
@@ -277,18 +258,6 @@ void rmm_delete(struct rmm_target_obj *target)
}
/*
- * ======== rmm_exit ========
- */
-void rmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== rmm_free ========
*/
bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
@@ -297,15 +266,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
struct rmm_ovly_sect *sect, *tmp;
bool ret = false;
- DBC_REQUIRE(target);
-
- DBC_REQUIRE(reserved || segid < target->num_segs);
- DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
- (dsp_addr + size) <= (target->seg_tab[segid].
- base +
- target->seg_tab[segid].
- length)));
-
/*
* Free or unreserve memory.
*/
@@ -319,7 +279,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
list_for_each_entry_safe(sect, tmp, &target->ovly_list,
list_elem) {
if (dsp_addr == sect->addr) {
- DBC_ASSERT(size == sect->size);
/* Remove from list */
list_del(&sect->list_elem);
kfree(sect);
@@ -331,18 +290,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
}
/*
- * ======== rmm_init ========
- */
-bool rmm_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- return true;
-}
-
-/*
* ======== rmm_stat ========
*/
bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
@@ -354,9 +301,6 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
u32 total_free_size = 0;
u32 free_blocks = 0;
- DBC_REQUIRE(mem_stat_buf != NULL);
- DBC_ASSERT(target != NULL);
-
if ((u32) segid < target->num_segs) {
head = target->free_list[segid];
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
index 3fae0e9f511..34cc934e0c3 100644
--- a/drivers/staging/tidspbridge/rmgr/strm.c
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -84,9 +81,6 @@ struct strm_object {
struct cmm_xlatorobject *xlator;
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/* ----------------------------------- Function Prototypes */
static int delete_strm(struct strm_object *stream_obj);
@@ -104,9 +98,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
u32 i;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (stream_obj) {
/*
* Allocate from segment specified at time of stream open.
@@ -122,7 +113,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
goto func_end;
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
(void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
usize);
if (ap_buffer[i] == NULL) {
@@ -156,8 +146,6 @@ int strm_close(struct strm_res_object *strmres,
int status = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -167,7 +155,6 @@ int strm_close(struct strm_res_object *strmres,
status =
(*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
&chnl_info_obj);
- DBC_ASSERT(!status);
if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
status = -EPIPE;
@@ -180,9 +167,6 @@ int strm_close(struct strm_res_object *strmres,
idr_remove(pr_ctxt->stream_id, strmres->id);
func_end:
- DBC_ENSURE(status == 0 || status == -EFAULT ||
- status == -EPIPE || status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
stream_obj, status);
return status;
@@ -199,10 +183,6 @@ int strm_create(struct strm_mgr **strm_man,
struct strm_mgr *strm_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_man != NULL);
- DBC_REQUIRE(dev_obj != NULL);
-
*strm_man = NULL;
/* Allocate STRM manager object */
strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
@@ -217,7 +197,6 @@ int strm_create(struct strm_mgr **strm_man,
if (!status) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
- DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
}
}
@@ -226,8 +205,6 @@ int strm_create(struct strm_mgr **strm_man,
else
kfree(strm_mgr_obj);
- DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
-
return status;
}
@@ -238,27 +215,10 @@ int strm_create(struct strm_mgr **strm_man,
*/
void strm_delete(struct strm_mgr *strm_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_mgr_obj);
-
kfree(strm_mgr_obj);
}
/*
- * ======== strm_exit ========
- * Purpose:
- * Discontinue usage of STRM module.
- */
-void strm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== strm_free_buffer ========
* Purpose:
* Frees the buffers allocated for a stream.
@@ -270,15 +230,11 @@ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
u32 i = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (!stream_obj)
status = -EFAULT;
if (!status) {
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
status =
cmm_xlator_free_buf(stream_obj->xlator,
ap_buffer[i]);
@@ -306,10 +262,6 @@ int strm_get_info(struct strm_object *stream_obj,
int status = 0;
void *virt_base = NULL; /* NULL if no SM used */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(stream_info != NULL);
- DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -330,7 +282,6 @@ int strm_get_info(struct strm_object *stream_obj,
if (stream_obj->xlator) {
/* We have a translator */
- DBC_ASSERT(stream_obj->segment_id > 0);
cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
stream_obj->segment_id, false);
}
@@ -370,8 +321,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -388,25 +337,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
}
/*
- * ======== strm_init ========
- * Purpose:
- * Initialize the STRM module.
- */
-bool strm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== strm_issue ========
* Purpose:
* Issues a buffer on a stream
@@ -418,9 +348,6 @@ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuf != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -471,9 +398,6 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
void *stream_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strmres != NULL);
- DBC_REQUIRE(pattr != NULL);
*strmres = NULL;
if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
status = -EPERM;
@@ -536,14 +460,12 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
goto func_cont;
/* No System DMA */
- DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
/* Get the shared mem mgr for this streams dev object */
status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
if (!status) {
/*Allocate a SM addr translator for this strm. */
status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
if (!status) {
- DBC_ASSERT(strm_obj->segment_id > 0);
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
(u8 **) &pattr->virt_base,
@@ -575,10 +497,6 @@ func_cont:
* strm_mgr_obj->chnl_mgr better be valid or we
* assert here), and then return -EPERM.
*/
- DBC_ASSERT(status == -ENOSR ||
- status == -ECHRNG ||
- status == -EALREADY ||
- status == -EIO);
status = -EPERM;
}
}
@@ -594,12 +512,6 @@ func_cont:
(void)delete_strm(strm_obj);
}
- /* ensure we return a documented error code */
- DBC_ENSURE((!status && strm_obj) ||
- (*strmres == NULL && (status == -EFAULT ||
- status == -EPERM
- || status == -EINVAL)));
-
dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
"strmres: %p status: 0x%x\n", __func__,
hnode, dir, index, pattr, strmres, status);
@@ -619,11 +531,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_ptr != NULL);
- DBC_REQUIRE(nbytes != NULL);
- DBC_REQUIRE(pdw_arg != NULL);
-
if (!stream_obj) {
status = -EFAULT;
goto func_end;
@@ -679,11 +586,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
*buf_ptr = chnl_ioc_obj.buf;
}
func_end:
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
"pdw_arg: %p status 0x%x\n", __func__, stream_obj,
buf_ptr, nbytes, pdw_arg, status);
@@ -702,9 +604,6 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
@@ -725,10 +624,7 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
notify_type,
hnotification);
}
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -ENOSYS || status == -EPERM);
+
return status;
}
@@ -747,11 +643,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
u32 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_tab != NULL);
- DBC_REQUIRE(pmask != NULL);
- DBC_REQUIRE(strms > 0);
-
*pmask = 0;
for (i = 0; i < strms; i++) {
if (!strm_tab[i]) {
@@ -811,9 +702,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
func_end:
kfree(sync_events);
- DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
- (status && *pmask == 0));
-
return status;
}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index d4073684eac..a73e437ec21 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -35,7 +35,6 @@
struct stub_device {
struct usb_interface *interface;
struct usb_device *udev;
- struct list_head list;
struct usbip_device ud;
__u32 devid;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 03420e25d9c..fa870e3f7f6 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -297,7 +297,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
sdev->devid = (busnum << 16) | devnum;
sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE;
- /* sdev->ud.lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&sdev->ud.lock);
sdev->ud.tcp_socket = NULL;
@@ -306,7 +305,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
INIT_LIST_HEAD(&sdev->priv_free);
INIT_LIST_HEAD(&sdev->unlink_free);
INIT_LIST_HEAD(&sdev->unlink_tx);
- /* sdev->priv_lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&sdev->priv_lock);
init_waitqueue_head(&sdev->tx_waitq);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 27ac363d1cf..1d5b3fc6216 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -367,15 +367,6 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
epd = &ep->desc;
-#if 0
- /* epnum 0 is always control */
- if (epnum == 0) {
- if (dir == USBIP_DIR_OUT)
- return usb_sndctrlpipe(udev, 0);
- else
- return usb_rcvctrlpipe(udev, 0);
- }
-#endif
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index d93e7f1f797..70f23026932 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -735,26 +735,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
* buffer and iso packets need to be stored and be in propeper endian in urb
* before calling this function
*/
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
{
int np = urb->number_of_packets;
int i;
- int ret;
int actualoffset = urb->actual_length;
if (!usb_pipeisoc(urb->pipe))
- return 0;
+ return;
/* if no packets or length of data is 0, then nothing to unpack */
if (np == 0 || urb->actual_length == 0)
- return 0;
+ return;
/*
* if actual_length is transfer_buffer_length then no padding is
* present.
*/
if (urb->actual_length == urb->transfer_buffer_length)
- return 0;
+ return;
/*
* loop over all packets from last to first (to prevent overwritting
@@ -766,8 +765,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
urb->transfer_buffer + actualoffset,
urb->iso_frame_desc[i].actual_length);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(usbip_pad_iso);
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index b8f8c48b8a7..c7b888ca54f 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -306,7 +306,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* usbip_event.c */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 2ee97e2095b..dca9bf11f0c 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -386,29 +386,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dum->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
-#if 0
- if (dum->driver) {
- dum->port_status[rhport] |=
- USB_PORT_STAT_ENABLE;
- /* give it the best speed we agree on */
- dum->gadget.speed = dum->driver->speed;
- dum->gadget.ep0->maxpacket = 64;
- switch (dum->gadget.speed) {
- case USB_SPEED_HIGH:
- dum->port_status[rhport] |=
- USB_PORT_STAT_HIGH_SPEED;
- break;
- case USB_SPEED_LOW:
- dum->gadget.ep0->maxpacket = 8;
- dum->port_status[rhport] |=
- USB_PORT_STAT_LOW_SPEED;
- break;
- default:
- dum->gadget.speed = USB_SPEED_FULL;
- break;
- }
- }
-#endif
}
((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
@@ -425,15 +402,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
usbip_dbg_vhci_rh(" SetPortFeature: "
"USB_PORT_FEAT_SUSPEND\n");
-#if 0
- dum->port_status[rhport] |=
- (1 << USB_PORT_FEAT_SUSPEND);
- if (dum->driver->suspend) {
- spin_unlock(&dum->lock);
- dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- }
-#endif
break;
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(" SetPortFeature: "
@@ -444,13 +412,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
~(USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED);
-#if 0
- if (dum->driver) {
- dev_dbg(hardware, "disconnect\n");
- stop_activity(dum, dum->driver);
- }
-#endif
-
/* FIXME test that code path! */
}
/* 50msec reset signaling */
@@ -934,14 +895,12 @@ static void vhci_device_init(struct vhci_device *vdev)
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
- /* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&vdev->ud.lock);
INIT_LIST_HEAD(&vdev->priv_rx);
INIT_LIST_HEAD(&vdev->priv_tx);
INIT_LIST_HEAD(&vdev->unlink_tx);
INIT_LIST_HEAD(&vdev->unlink_rx);
- /* vdev->priv_lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&vdev->priv_lock);
init_waitqueue_head(&vdev->waitq_tx);
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 3f511b47563..f5fba7320c5 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
return;
/* restore the padding in iso packets */
- if (usbip_pad_iso(ud, urb) < 0)
- return;
+ usbip_pad_iso(ud, urb);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index 3c593136453..72d9ce0bcb4 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -243,7 +243,7 @@ struct pio2_card {
int pio2_cntr_reset(struct pio2_card *);
int pio2_gpio_reset(struct pio2_card *);
-int __init pio2_gpio_init(struct pio2_card *);
-void __exit pio2_gpio_exit(struct pio2_card *);
+int __devinit pio2_gpio_init(struct pio2_card *);
+void pio2_gpio_exit(struct pio2_card *);
#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index dc837deb99d..858484915f0 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -187,7 +187,7 @@ int pio2_gpio_reset(struct pio2_card *card)
return 0;
}
-int __init pio2_gpio_init(struct pio2_card *card)
+int __devinit pio2_gpio_init(struct pio2_card *card)
{
int retval = 0;
char *label;
@@ -220,7 +220,7 @@ int __init pio2_gpio_init(struct pio2_card *card)
return retval;
};
-void __exit pio2_gpio_exit(struct pio2_card *card)
+void pio2_gpio_exit(struct pio2_card *card)
{
const char *label = card->gc.label;
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 9d38ceed60e..c9d65bf14ce 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -156,7 +156,7 @@ int vme_irq_request(struct vme_dev *, int, int,
void vme_irq_free(struct vme_dev *, int, int);
int vme_irq_generate(struct vme_dev *, int, int);
-struct vme_resource * vme_lm_request(struct vme_dev *);
+struct vme_resource *vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 577599ed70a..1368e8cc9ad 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -1327,13 +1327,13 @@ start:
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescaning.
+ // if adhoc started which essid is NULL string, rescanning.
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
}
else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 7fd5cc5a55f..ef197efab04 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -324,16 +324,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
+ if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
pList->sBSSIDList[ii].byNetType = INFRA;
- } else {
+ else
pList->sBSSIDList[ii].byNetType = ADHOC;
- }
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
+
+ if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
pList->sBSSIDList[ii].bWEPOn = true;
- } else {
+ else
pList->sBSSIDList[ii].bWEPOn = false;
- }
+
ii++;
if (ii >= pList->uItem)
break;
@@ -367,9 +367,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false) {
+ if (pDevice->bRadioOff == false)
CARDbRadioPowerOff(pDevice);
- }
+
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -489,13 +489,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
break;
}
- if (sStartAPCmd.wBBPType == PHY80211g) {
+ if (sStartAPCmd.wBBPType == PHY80211g)
pMgmt->byAPBBType = PHY_TYPE_11G;
- } else if (sStartAPCmd.wBBPType == PHY80211a) {
+ else if (sStartAPCmd.wBBPType == PHY80211a)
pMgmt->byAPBBType = PHY_TYPE_11A;
- } else {
+ else
pMgmt->byAPBBType = PHY_TYPE_11B;
- }
pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 32c67ed8435..619c257e877 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -1195,13 +1195,13 @@ else {
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescaning.
+ // if adhoc started which essid is NULL string, rescanning.
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
}
else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index ecfda5272fa..b24e5314a6a 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -46,9 +46,6 @@
#include <net/iw_handler.h>
-
-/*--------------------- Static Definitions -------------------------*/
-
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#define SUPPORTED_WIRELESS_EXT 18
#else
@@ -63,19 +60,8 @@ static const long frequency_list[] = {
5700, 5745, 5765, 5785, 5805, 5825
};
-
-/*--------------------- Static Classes ----------------------------*/
-
-
-//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-
-/*--------------------- Static Variables --------------------------*/
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
{
PSDevice pDevice = netdev_priv(dev);
@@ -87,7 +73,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
- //pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
pDevice->wstats.qual.noise = 0;
pDevice->wstats.qual.updated = 1;
pDevice->wstats.discard.nwid = 0;
@@ -100,21 +85,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
return &pDevice->wstats;
}
-
-
-/*------------------------------------------------------------------*/
-
-
-static int iwctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- void *wrq,
- char *extra)
-{
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT\n");
-
- return 0;
-}
-
/*
* Wireless Handler : get protocol name
*/
@@ -197,14 +167,12 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- //printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
}
-
/*
* Wireless Handler : get scan results
*/
@@ -503,7 +471,7 @@ int iwctl_siwmode(struct net_device *dev,
* Wireless Handler : get operation mode
*/
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra)
@@ -530,8 +498,6 @@ int iwctl_giwmode(struct net_device *dev,
default:
*wmode = IW_MODE_ADHOC;
}
-
- return 0;
}
@@ -539,7 +505,7 @@ int iwctl_giwmode(struct net_device *dev,
* Wireless Handler : get capability range
*/
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -634,9 +600,6 @@ int iwctl_giwrange(struct net_device *dev,
range->avg_qual.level = 176; // -80 dBm
range->avg_qual.noise = 0;
}
-
-
- return 0;
}
@@ -708,9 +671,7 @@ int iwctl_giwap(struct net_device *dev,
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
-//20080123-02,<Modify> by Einsn Liu
if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
- // if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode == WMAC_MODE_ESS_STA))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -895,8 +856,7 @@ int iwctl_siwessid(struct net_device *dev,
/*
* Wireless Handler : get essid
*/
-
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -913,14 +873,11 @@ int iwctl_giwessid(struct net_device *dev,
// Get the current SSID
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- //pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
extra[pItemSSID->len] = '\0';
wrq->length = pItemSSID->len;
wrq->flags = 1; // active
-
- return 0;
}
/*
@@ -1008,8 +965,7 @@ int iwctl_siwrate(struct net_device *dev,
/*
* Wireless Handler : get data rate
*/
-
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra)
@@ -1047,9 +1003,6 @@ int iwctl_giwrate(struct net_device *dev,
if (pDevice->bFixRate == TRUE)
wrq->fixed = TRUE;
}
-
-
- return 0;
}
@@ -1057,27 +1010,19 @@ int iwctl_giwrate(struct net_device *dev,
/*
* Wireless Handler : set rts threshold
*/
-
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
+ struct iw_param *wrq)
{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- int rc = 0;
+ PSDevice pDevice = (PSDevice)netdev_priv(dev);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRTS \n");
+ if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
+ return -EINVAL;
- {
- int rthr = wrq->value;
- if(wrq->disabled)
- rthr = 2312;
- if((rthr < 0) || (rthr > 2312)) {
- rc = -EINVAL;
- }else {
- pDevice->wRTSThreshold = rthr;
- }
- }
+ else if (wrq->disabled)
+ pDevice->wRTSThreshold = 2312;
+
+ else
+ pDevice->wRTSThreshold = wrq->value;
return 0;
}
@@ -1327,55 +1272,6 @@ int iwctl_siwencode(struct net_device *dev,
return rc;
}
-/*
- * Wireless Handler : get encode mode
- */
-//2008-0409-06, <Mark> by Einsn Liu
- /*
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
- char abyKey[WLAN_WEP232_KEYLEN];
- unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
- PSKeyItem pKey = NULL;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
-
-
- memset(abyKey, 0, sizeof(abyKey));
- // Check encryption mode
- wrq->flags = IW_ENCODE_NOKEY;
- // Is WEP enabled ???
- if (pDevice->bEncryptionEnable)
- wrq->flags |= IW_ENCODE_ENABLED;
- else
- wrq->flags |= IW_ENCODE_DISABLED;
-
- if (pMgmt->bShareKeyAlgorithm)
- wrq->flags |= IW_ENCODE_RESTRICTED;
- else
- wrq->flags |= IW_ENCODE_OPEN;
-
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- }
- else {
- rc = -EINVAL;
- return rc;
- }
- wrq->flags |= index;
- // Copy the key to the user buffer
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- return 0;
-}
-*/
-
int iwctl_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
@@ -1562,7 +1458,6 @@ int iwctl_siwauth(struct net_device *dev,
wpa_version = wrq->value;
if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- //pDevice->bWPADEVUp = FALSE;
}
else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) {
PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
@@ -1570,7 +1465,6 @@ int iwctl_siwauth(struct net_device *dev,
else {
PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
}
- //pDevice->bWPASuppWextEnabled =TRUE;
break;
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
@@ -1627,11 +1521,6 @@ int iwctl_siwauth(struct net_device *dev,
}
break;
case IW_AUTH_WPA_ENABLED:
- //pDevice->bWPADEVUp = !! wrq->value;
- //if(pDevice->bWPADEVUp==TRUE)
- // printk("iwctl_siwauth:set WPADEV to enable successful*******\n");
- //else
- // printk("iwctl_siwauth:set WPADEV to enable fail?????\n");
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
break;
@@ -1646,7 +1535,6 @@ int iwctl_siwauth(struct net_device *dev,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pMgmt->bShareKeyAlgorithm = FALSE;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- //pDevice->bWPADEVUp = FALSE;
PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
}
@@ -1655,15 +1543,6 @@ int iwctl_siwauth(struct net_device *dev,
ret = -EOPNOTSUPP;
break;
}
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_version = %d\n",wpa_version);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADEVUp = %s\n",pDevice->bWPADEVUp?"TRUE":"FALSE");
-*/
return ret;
}
@@ -1752,8 +1631,6 @@ int iwctl_siwencodeext(struct net_device *dev,
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
u8 key[64];
size_t seq_len=0,key_len=0;
-//
- // int ii;
u8 *buf;
size_t blen;
u8 key_array[64];
@@ -1883,7 +1760,6 @@ int iwctl_siwmlme(struct net_device *dev,
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- //u16 reason = cpu_to_le16(mlme->reason_code);
int ret = 0;
if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){
@@ -1892,12 +1768,6 @@ int iwctl_siwmlme(struct net_device *dev,
}
switch(mlme->cmd){
case IW_MLME_DEAUTH:
- //this command seems to be not complete,please test it --einsnliu
- //printk("iwctl_siwmlme--->send DEAUTH\n");
- /* bScheduleCommand((void *) pDevice,
- WLAN_CMD_DEAUTH,
- (PBYTE)&reason); */
- //break;
case IW_MLME_DISASSOC:
if(pDevice->bLinkPass == TRUE){
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
@@ -1916,77 +1786,9 @@ int iwctl_siwmlme(struct net_device *dev,
#endif
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-
-/*
-static const iw_handler iwctl_handler[] =
-{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
- (iw_handler) iwctl_giwname, // SIOCGIWNAME
- (iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
- (iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
- (iw_handler) iwctl_siwmode, // SIOCSIWMODE
- (iw_handler) iwctl_giwmode, // SIOCGIWMODE
- (iw_handler) NULL, // SIOCSIWSENS
- (iw_handler) iwctl_giwsens, // SIOCGIWSENS
- (iw_handler) NULL, // SIOCSIWRANGE
- (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
- (iw_handler) NULL, // SIOCSIWPRIV
- (iw_handler) NULL, // SIOCGIWPRIV
- (iw_handler) NULL, // SIOCSIWSTATS
- (iw_handler) NULL, // SIOCGIWSTATS
- (iw_handler) NULL, // SIOCSIWSPY
- (iw_handler) NULL, // SIOCGIWSPY
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwap, // SIOCSIWAP
- (iw_handler) iwctl_giwap, // SIOCGIWAP
- (iw_handler) NULL, // -- hole -- 0x16
- (iw_handler) iwctl_giwaplist, // SIOCGIWAPLIST
- (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
- (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
- (iw_handler) iwctl_siwessid, // SIOCSIWESSID
- (iw_handler) iwctl_giwessid, // SIOCGIWESSID
- (iw_handler) NULL, // SIOCSIWNICKN
- (iw_handler) NULL, // SIOCGIWNICKN
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwrate, // SIOCSIWRATE 0x20
- (iw_handler) iwctl_giwrate, // SIOCGIWRATE
- (iw_handler) iwctl_siwrts, // SIOCSIWRTS
- (iw_handler) iwctl_giwrts, // SIOCGIWRTS
- (iw_handler) iwctl_siwfrag, // SIOCSIWFRAG
- (iw_handler) iwctl_giwfrag, // SIOCGIWFRAG
- (iw_handler) NULL, // SIOCSIWTXPOW
- (iw_handler) NULL, // SIOCGIWTXPOW
- (iw_handler) iwctl_siwretry, // SIOCSIWRETRY
- (iw_handler) iwctl_giwretry, // SIOCGIWRETRY
- (iw_handler) iwctl_siwencode, // SIOCSIWENCODE
- (iw_handler) iwctl_giwencode, // SIOCGIWENCODE
- (iw_handler) iwctl_siwpower, // SIOCSIWPOWER
- (iw_handler) iwctl_giwpower, // SIOCGIWPOWER
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwgenie, // SIOCSIWGENIE
- (iw_handler) iwctl_giwgenie, // SIOCGIWGENIE
- (iw_handler) iwctl_siwauth, // SIOCSIWAUTH
- (iw_handler) iwctl_giwauth, // SIOCGIWAUTH
- (iw_handler) iwctl_siwencodeext, // SIOCSIWENCODEEXT
- (iw_handler) iwctl_giwencodeext, // SIOCGIWENCODEEXT
- (iw_handler) NULL, // SIOCSIWPMKSA
- (iw_handler) NULL, // -- hole --
-
-};
-*/
-
static const iw_handler iwctl_handler[] =
{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
+ (iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) NULL, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
(iw_handler) NULL, // SIOCGIWNWID
@@ -2063,13 +1865,9 @@ const struct iw_handler_def iwctl_handler_def =
{
.get_wireless_stats = &iwctl_get_wireless_stats,
.num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
-// .num_private = sizeof(iwctl_private_handler)/sizeof(iw_handler),
-// .num_private_args = sizeof(iwctl_private_args)/sizeof(struct iw_priv_args),
.num_private = 0,
.num_private_args = 0,
.standard = (iw_handler *) iwctl_handler,
-// .private = (iw_handler *) iwctl_private_handler,
-// .private_args = (struct iw_priv_args *)iwctl_private_args,
.private = NULL,
.private_args = NULL,
};
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index 10a240e6501..0c6e0496779 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -46,13 +46,13 @@ int iwctl_siwap(struct net_device *dev,
struct sockaddr *wrq,
char *extra);
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra);
@@ -97,7 +97,7 @@ int iwctl_siwessid(struct net_device *dev,
struct iw_point *wrq,
char *extra);
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
@@ -107,16 +107,13 @@ int iwctl_siwrate(struct net_device *dev,
struct iw_param *wrq,
char *extra);
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra);
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
+ struct iw_param *wrq);
int iwctl_giwrts(struct net_device *dev,
struct iw_request_info *info,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 6a708f44765..763e028a5cc 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1657,8 +1657,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.pointer) {
- rc = iwctl_giwessid(dev, NULL,
- &(wrq->u.essid), essid);
+ iwctl_giwessid(dev, NULL,
+ &(wrq->u.essid), essid);
if (copy_to_user(wrq->u.essid.pointer,
essid,
wrq->u.essid.length) )
@@ -1698,14 +1698,13 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get the current bit-rate
case SIOCGIWRATE:
-
- rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
+ iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Set the desired RTS threshold
case SIOCSIWRTS:
- rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
+ rc = iwctl_siwrts(dev, &(wrq->u.rts));
break;
// Get the current RTS threshold
@@ -1733,7 +1732,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get mode of operation
case SIOCGIWMODE:
- rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
+ iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Set WEP keys and mode
@@ -1811,7 +1810,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
struct iw_range range;
- rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
+ iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
rc = -EFAULT;
}
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 2fa4f845a75..5435e8205b2 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -46,23 +46,18 @@
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
-
-
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
+
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
-
-
-
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
@@ -72,9 +67,9 @@ static void wpadev_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
- memset(dev->broadcast,0xFF, ETH_ALEN);
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
@@ -90,45 +85,43 @@ static void wpadev_setup(struct net_device *dev)
* Return Value:
*
*/
-
static int wpa_init_wpadev(PSDevice pDevice)
{
- PSDevice wpadev_priv;
+ PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
- int ret=0;
+ int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
- wpadev_priv = netdev_priv(pDevice->wpadev);
- *wpadev_priv = *pDevice;
+ wpadev_priv = netdev_priv(pDevice->wpadev);
+ *wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
- pDevice->wpadev->base_addr = dev->base_addr;
+ pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
- dev->name);
+ dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDevice->skb == NULL)
- return -ENOMEM;
- }
+ pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ if (pDevice->skb == NULL)
+ return -ENOMEM;
+ }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
- dev->name, pDevice->wpadev->name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
+ dev->name, pDevice->wpadev->name);
return 0;
}
-
/*
* Description:
* unregister net_device (wpadev)
@@ -141,29 +134,24 @@ static int wpa_init_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
static int wpa_release_wpadev(PSDevice pDevice)
{
- if (pDevice->skb) {
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = NULL;
- }
+ if (pDevice->skb) {
+ dev_kfree_skb(pDevice->skb);
+ pDevice->skb = NULL;
+ }
- if (pDevice->wpadev) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->wpadev->name);
- unregister_netdev(pDevice->wpadev);
- free_netdev(pDevice->wpadev);
- pDevice->wpadev = NULL;
- }
+ if (pDevice->wpadev) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->wpadev->name);
+ unregister_netdev(pDevice->wpadev);
+ free_netdev(pDevice->wpadev);
+ pDevice->wpadev = NULL;
+ }
return 0;
}
-
-
-
-
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
@@ -177,13 +165,11 @@ static int wpa_release_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
- else
- return wpa_release_wpadev(pDevice);
+ return wpa_release_wpadev(pDevice);
}
/*
@@ -199,245 +185,217 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
- struct viawget_wpa_param *param=ctx;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
- QWORD KeyRSC;
-// NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
+ struct viawget_wpa_param *param = ctx;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ DWORD dwKeyIndex = 0;
+ BYTE abyKey[MAX_KEY_LEN];
+ BYTE abySeq[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
- int uu, ii;
-
+ int uu;
+ int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
+ param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = FALSE;
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
- for (uu=0; uu<MAX_KEY_TABLE; uu++) {
- MACvDisableKeyEntry(pDevice, uu);
- }
- return ret;
- }
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pDevice->bEncryptionEnable = FALSE;
+ pDevice->byKeyIndex = 0;
+ pDevice->bTransmitKey = FALSE;
+ for (uu=0; uu<MAX_KEY_TABLE; uu++) {
+ MACvDisableKeyEntry(pDevice, uu);
+ }
+ return ret;
+ }
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.key && fcpfkernel) {
- memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
- }
- else {
- if (param->u.wpa_key.key &&
- copy_from_user(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.key && fcpfkernel) {
+ memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
+ } else {
+ if (param->u.wpa_key.key &&
+ copy_from_user(&abyKey[0], param->u.wpa_key.key,
+ param->u.wpa_key.key_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
- }
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
- dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
+ dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
- if (dwKeyIndex > 3) {
- return -EINVAL;
- }
- else {
- if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
- dwKeyIndex |= (1 << 31);
- }
- KeybSetDefaultKey( pDevice,
- &(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.wpa_key.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP
- );
-
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
- return ret;
+ if (dwKeyIndex > 3) {
+ return -EINVAL;
+ } else {
+ if (param->u.wpa_key.set_tx) {
+ pDevice->byKeyIndex = (BYTE)dwKeyIndex;
+ pDevice->bTransmitKey = TRUE;
+ dwKeyIndex |= (1 << 31);
+ }
+ KeybSetDefaultKey( pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex & ~(BIT30 | USE_KEYRSC),
+ param->u.wpa_key.key_len,
+ NULL,
+ abyKey,
+ KEY_CTL_WEP
+ );
+
+ }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pDevice->bEncryptionEnable = TRUE;
+ return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.seq && fcpfkernel) {
- memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
- }
- else {
- if (param->u.wpa_key.seq &&
- copy_from_user(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.seq && fcpfkernel) {
+ memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
+ } else {
+ if (param->u.wpa_key.seq &&
+ copy_from_user(&abySeq[0], param->u.wpa_key.seq,
+ param->u.wpa_key.seq_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
- if (ii < 4)
- LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
- else
- HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
- //KeyRSC |= (abySeq[ii] << (ii * 8));
+ if (ii < 4)
+ LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
+ else
+ HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
- if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
- return -EINVAL;
- }
+ if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
+ return -EINVAL;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
+ }
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- byKeyDecMode = KEY_CTL_CCMP;
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- byKeyDecMode = KEY_CTL_TKIP;
- else
- byKeyDecMode = KEY_CTL_WEP;
-
- // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (param->u.wpa_key.key_len == MAX_KEY_LEN)
- byKeyDecMode = KEY_CTL_TKIP;
- else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- }
-
- // Check TKIP key length
- if ((byKeyDecMode == KEY_CTL_TKIP) &&
- (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
- // TKIP Key must be 256 bits
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - TKIP Key must be 256 bits\n"));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
- return -EINVAL;
- }
- // Check AES key length
- if ((byKeyDecMode == KEY_CTL_CCMP) &&
- (param->u.wpa_key.key_len != AES_KEY_LEN)) {
- // AES Key must be 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
- return -EINVAL;
- }
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
+ byKeyDecMode = KEY_CTL_CCMP;
+ else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else
+ byKeyDecMode = KEY_CTL_WEP;
+
+ // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
+ if (param->u.wpa_key.key_len == MAX_KEY_LEN)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
+ if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ }
- if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
- /* if broadcast, set the key as every key entry's group key */
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
-
- if ((KeybSetAllGroupKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) &&
- (KeybSetDefaultKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
-
- } else {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -KeybSetDefaultKey Fail.0\n"));
- return -EINVAL;
- }
-
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
- // BSSID not 0xffffffffffff
- // Pairwise Key can't be WEP
- if (byKeyDecMode == KEY_CTL_WEP) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
- return -EINVAL;
- }
-
- dwKeyIndex |= (1 << 30); // set pairwise key
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
- return -EINVAL;
- }
- if (KeybSetKey(pDevice,
- &(pDevice->sKey),
- &param->addr[0],
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
-
- } else {
- // Key Table Full
- if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
- return -EINVAL;
-
- } else {
- // Save Key and configure just before associate/reassociate to BSSID
- // we do not implement now
- return -EINVAL;
- }
- }
- } // BSSID not 0xffffffffffff
- if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = TRUE;
+ // Check TKIP key length
+ if ((byKeyDecMode == KEY_CTL_TKIP) &&
+ (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
+ // TKIP Key must be 256 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
+ return -EINVAL;
}
- pDevice->bEncryptionEnable = TRUE;
+ // Check AES key length
+ if ((byKeyDecMode == KEY_CTL_CCMP) &&
+ (param->u.wpa_key.key_len != AES_KEY_LEN)) {
+ // AES Key must be 128 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
+ return -EINVAL;
+ }
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n",
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][3],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][4]
- );
-*/
+ if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
+ /* if broadcast, set the key as every key entry's group key */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
+
+ if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) &&
+ (KeybSetDefaultKey(pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) ) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
+ // BSSID not 0xffffffffffff
+ // Pairwise Key can't be WEP
+ if (byKeyDecMode == KEY_CTL_WEP) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
+ return -EINVAL;
+ }
+ dwKeyIndex |= (1 << 30); // set pairwise key
+ if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
+ return -EINVAL;
+ }
+ if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0],
+ dwKeyIndex, param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
+ ) == TRUE) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
+ } else {
+ // Key Table Full
+ if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
+ return -EINVAL;
+ } else {
+ // Save Key and configure just before associate/reassociate to BSSID
+ // we do not implement now
+ return -EINVAL;
+ }
+ }
+ } // BSSID not 0xffffffffffff
+ if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
+ pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
+ pDevice->bTransmitKey = TRUE;
+ }
+ pDevice->bEncryptionEnable = TRUE;
return ret;
-
}
@@ -454,23 +412,17 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
-static int wpa_set_wpa(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
-
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ pMgmt->bShareKeyAlgorithm = FALSE;
- return ret;
+ return ret;
}
-
-
-
/*
* Description:
* set disassociate
@@ -484,25 +436,21 @@ static int wpa_set_wpa(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_disassociate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass) {
- if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- spin_unlock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->bLinkPass) {
+ if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ }
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
-
/*
* Description:
* enable scan process
@@ -516,36 +464,30 @@ static int wpa_set_disassociate(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
-printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
- param->u.scan_req.ssid,param->u.scan_req.ssid_len);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
+ param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
-memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
-pItemSSID->byElementID = WLAN_EID_SSID;
-memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
-pItemSSID->len = param->u.scan_req.ssid_len;
-
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- /* bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); */
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- spin_unlock_irq(&pDevice->lock);
-
- return ret;
-}
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
+ memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
+ pItemSSID->len = param->u.scan_req.ssid_len;
+ spin_lock_irq(&pDevice->lock);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ spin_unlock_irq(&pDevice->lock);
+ return ret;
+}
/*
* Description:
@@ -560,19 +502,15 @@ pItemSSID->len = param->u.scan_req.ssid_len;
* Return Value:
*
*/
-
-static int wpa_get_bssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ int ret = 0;
+ memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
-
}
-
/*
* Description:
* get bssid
@@ -586,24 +524,20 @@ static int wpa_get_bssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_ssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
int ret = 0;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len);
+ memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
- return ret;
+ return ret;
}
-
-
/*
* Description:
* get scan results
@@ -617,135 +551,114 @@ static int wpa_get_ssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- PKnownBSS pBSS;
- PBYTE pBuf;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ PKnownBSS pBSS;
+ PBYTE pBuf;
int ret = 0;
u16 count = 0;
- u16 ii, jj;
- long ldBm;//James //add
+ u16 ii;
+ u16 jj;
+ long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
+ PBYTE ptempBSS;
- PBYTE ptempBSS;
-
+ ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
+ if (ptempBSS == NULL) {
+ printk("bubble sort kmalloc memory fail@@@\n");
+ ret = -ENOMEM;
+ return ret;
+ }
- ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);
-
- if (ptempBSS == NULL) {
-
- printk("bubble sort kmalloc memory fail@@@\n");
-
- ret = -ENOMEM;
-
- return ret;
-
- }
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
-
- for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
-
- if ((pMgmt->sBSSList[jj].bActive != TRUE) ||
-
- ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
-
- memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj],&pMgmt->sBSSList[jj+1],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj+1],ptempBSS,sizeof(KnownBSS));
-
- }
-
- }
-
- }
-
- kfree(ptempBSS);
-
- // printk("bubble sort result:\n");
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
+ if ((pMgmt->sBSSList[jj].bActive != TRUE)
+ || ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
+ && (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
+ memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
+ sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
+ }
+ }
+ }
+ kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- count++;
- }
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (!pBSS->bActive)
+ continue;
+ count++;
+ }
- pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);
+ pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
- if (pBuf == NULL) {
- ret = -ENOMEM;
- return ret;
- }
- scan_buf = (struct viawget_scan_result *)pBuf;
+ if (pBuf == NULL) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (pBSS->bActive) {
- if (jj >= count)
- break;
- memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
- scan_buf->ssid_len = pItemSSID->len;
- scan_buf->freq = frequency_list[pBSS->uChannel-1];
- scan_buf->caps = pBSS->wCapInfo; //DavidWang for sharemode
-
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- if(-ldBm<50){
+ for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (pBSS->bActive) {
+ if (jj >= count)
+ break;
+ memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
+ pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
+ memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
+ scan_buf->ssid_len = pItemSSID->len;
+ scan_buf->freq = frequency_list[pBSS->uChannel-1];
+ scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
+
+ RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
+ if (-ldBm < 50)
scan_buf->qual = 100;
- }else if(-ldBm > 90) {
- scan_buf->qual = 0;
- }else {
+ else if (-ldBm > 90)
+ scan_buf->qual = 0;
+ else
scan_buf->qual=(40-(-ldBm-50))*100/40;
- }
//James
- //scan_buf->caps = pBSS->wCapInfo;
- //scan_buf->qual =
- scan_buf->noise = 0;
- scan_buf->level = ldBm;
-
- //scan_buf->maxrate =
- if (pBSS->wWPALen != 0) {
- scan_buf->wpa_ie_len = pBSS->wWPALen;
- memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
- }
- if (pBSS->wRSNLen != 0) {
- scan_buf->rsn_ie_len = pBSS->wRSNLen;
- memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
- }
- scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
- jj ++;
- }
- }
+ //scan_buf->caps = pBSS->wCapInfo;
+ //scan_buf->qual =
+ scan_buf->noise = 0;
+ scan_buf->level = ldBm;
+
+ //scan_buf->maxrate =
+ if (pBSS->wWPALen != 0) {
+ scan_buf->wpa_ie_len = pBSS->wWPALen;
+ memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
+ }
+ if (pBSS->wRSNLen != 0) {
+ scan_buf->rsn_ie_len = pBSS->wRSNLen;
+ memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
+ }
+ scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
+ jj ++;
+ }
+ }
- if (jj < count)
- count = jj;
+ if (jj < count)
+ count = jj;
- if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) {
+ if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
- }
+
param->u.scan_results.scan_count = count;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
- kfree(pBuf);
- return ret;
+ kfree(pBuf);
+ return ret;
}
-
-
/*
* Description:
* set associate with AP
@@ -759,25 +672,23 @@ static int wpa_get_scan(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_associate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- BYTE abyWPAIE[64];
- int ret = 0;
- BOOL bwepEnabled=FALSE;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ BYTE abyWPAIE[64];
+ int ret = 0;
+ BOOL bwepEnabled=FALSE;
// set key type & algorithm
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); //Davidwang
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
@@ -789,25 +700,25 @@ static int wpa_set_associate(PSDevice pDevice,
}
if (param->u.wpa_associate.mode == 1)
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
- if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
- memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
- // set ssid
+ if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
+ memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
+ // set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
- if (param->u.wpa_associate.wpa_ie_len == 0) {
- if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
- pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ if (param->u.wpa_associate.wpa_ie_len == 0) {
+ if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
+ pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
+ else
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
@@ -817,9 +728,9 @@ static int wpa_set_associate(PSDevice pDevice,
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
@@ -833,7 +744,6 @@ static int wpa_set_associate(PSDevice pDevice,
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
- // printk("****************wpa_set_associate:set CIPHER_WEP40_104\n");
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
@@ -845,70 +755,64 @@ static int wpa_set_associate(PSDevice pDevice,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
- // if ((pMgmt->Roam_dbm > 40)&&(pMgmt->Roam_dbm<80))
- // pDevice->bEnableRoaming = TRUE;
-
- if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { //@wep-sharekey
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pMgmt->bShareKeyAlgorithm = TRUE;
- }
- else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
- if(bwepEnabled==TRUE) { //@open-wep
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
- else { //@only open
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
+ if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pMgmt->bShareKeyAlgorithm = TRUE;
+ } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
+ if(bwepEnabled==TRUE) { //@open-wep
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ } else {
+ // @only open
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- }
-//mike save old encryption status
+ }
+ // mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
- if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = TRUE;
- else
- pDevice->bEncryptionEnable = FALSE;
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
- //mike re-comment:open-wep && sharekey-wep needn't do initial key!!
-
- }
- else
- KeyvInitTable(pDevice,&pDevice->sKey);
+ if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
+ pDevice->bEncryptionEnable = TRUE;
+ else
+ pDevice->bEncryptionEnable = FALSE;
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = FALSE;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
+ // mike re-comment:open-wep && sharekey-wep needn't do initial key!!
+ } else {
+ KeyvInitTable(pDevice,&pDevice->sKey);
+ }
-/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/
-{
- PKnownBSS pCurr = NULL;
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pDevice->eConfigPHYMode
- );
-
- if (pCurr == NULL){
- printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- }
-}
+ spin_lock_irq(&pDevice->lock);
+ pDevice->bLinkPass = FALSE;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ netif_stop_queue(pDevice->dev);
+
+/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
+ {
+ PKnownBSS pCurr = NULL;
+ pCurr = BSSpSearchBSSList(pDevice,
+ pMgmt->abyDesireBSSID,
+ pMgmt->abyDesireSSID,
+ pDevice->eConfigPHYMode
+ );
+
+ if (pCurr == NULL){
+ printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
+ bScheduleCommand((void *)pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ }
+ }
/****************************************************************/
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
@@ -922,7 +826,6 @@ static int wpa_set_associate(PSDevice pDevice,
* Return Value:
*
*/
-
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
@@ -930,10 +833,10 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
- p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
+ p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -944,63 +847,63 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
switch (param->cmd) {
case VIAWGET_SET_WPA:
- ret = wpa_set_wpa(pDevice, param);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
+ ret = wpa_set_wpa(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, FALSE);
- spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
+ spin_lock_irq(&pDevice->lock);
+ ret = wpa_set_keys(pDevice, param, FALSE);
+ spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
- ret = wpa_set_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
+ ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
- ret = wpa_get_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
+ ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
- ret = wpa_get_ssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
+ ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
- ret = wpa_get_bssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
+ ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
- ret = wpa_set_associate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
+ ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
- ret = wpa_set_disassociate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
+ ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
- case VIAWGET_SET_DEAUTHENTICATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
+ case VIAWGET_SET_DEAUTHENTICATE:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
- param->cmd);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
+ param->cmd);
+ kfree(param);
return -EOPNOTSUPP;
- break;
}
if ((ret == 0) && wpa_ioctl) {
@@ -1012,7 +915,5 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
out:
kfree(param);
-
return ret;
}
-
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index fb466f4c92e..4cd3ba5d564 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -356,7 +356,7 @@ int prism2_scan(struct wiphy *wiphy, struct net_device *dev,
msg1.msgcode = DIDmsg_dot11req_scan;
msg1.bsstype.data = P80211ENUM_bsstype_any;
- memset(&(msg1.bssid.data), 0xFF, sizeof(p80211item_pstr6_t));
+ memset(&msg1.bssid.data.data, 0xFF, sizeof(msg1.bssid.data.data));
msg1.bssid.data.len = 6;
if (request->n_ssids > 0) {
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 14bfeb2e704..0f51b4ab363 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -150,7 +150,7 @@ static int p80211knetdev_init(netdevice_t *netdev)
* Returns:
* the address of the statistics structure
----------------------------------------------------------------*/
-static struct net_device_stats *p80211knetdev_get_stats(netdevice_t * netdev)
+static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev)
{
wlandevice_t *wlandev = netdev->ml_priv;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 6675c8226ce..c3bb05dd744 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,6 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
/* SSID */
req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req->ssid.data.len = le16_to_cpu(item->ssid.len);
+ req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
/* supported rates */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 35f7b2a485e..e828fd403c3 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -7,47 +7,32 @@
#include "XGIfb.h"
#include "vb_struct.h"
+#include "../../video/sis/sis.h"
#include "vb_def.h"
#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0)
-#ifndef PCI_VENDOR_ID_XG
-#define PCI_VENDOR_ID_XG 0x18CA
+#ifndef PCI_DEVICE_ID_XGI_41
+#define PCI_DEVICE_ID_XGI_41 0x041
#endif
-
-#ifndef PCI_DEVICE_ID_XG_40
-#define PCI_DEVICE_ID_XG_40 0x040
-#endif
-#ifndef PCI_DEVICE_ID_XG_41
-#define PCI_DEVICE_ID_XG_41 0x041
-#endif
-#ifndef PCI_DEVICE_ID_XG_42
-#define PCI_DEVICE_ID_XG_42 0x042
+#ifndef PCI_DEVICE_ID_XGI_42
+#define PCI_DEVICE_ID_XGI_42 0x042
#endif
-#ifndef PCI_DEVICE_ID_XG_20
-#define PCI_DEVICE_ID_XG_20 0x020
-#endif
-#ifndef PCI_DEVICE_ID_XG_27
-#define PCI_DEVICE_ID_XG_27 0x027
+#ifndef PCI_DEVICE_ID_XGI_27
+#define PCI_DEVICE_ID_XGI_27 0x027
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_42)},
{0}
};
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* To be included in fb.h */
-#ifndef FB_ACCEL_XGI_XABRE
-#define FB_ACCEL_XGI_XABRE 41 /* XGI 330 ("Xabre") */
-#endif
-
-#define SEQ_DATA 0x15
-
#define XGISR (xgifb_info->dev_info.P3c4)
#define XGICR (xgifb_info->dev_info.P3d4)
#define XGIDACA (xgifb_info->dev_info.P3c8)
@@ -60,12 +45,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGIDAC2A XGIPART5
#define XGIDAC2D (XGIPART5 + 1)
-#define IND_XGI_PASSWORD 0x05 /* SRs */
-#define IND_XGI_RAMDAC_CONTROL 0x07
-#define IND_XGI_DRAM_SIZE 0x14
-#define IND_XGI_MODULE_ENABLE 0x1E
-#define IND_XGI_PCI_ADDRESS_SET 0x20
-
#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
#define IND_XGI_SCRATCH_REG_CR31 0x31
#define IND_XGI_SCRATCH_REG_CR32 0x32
@@ -73,10 +52,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define IND_XGI_LCD_PANEL 0x36
#define IND_XGI_SCRATCH_REG_CR37 0x37
-#define IND_XGI_CRT2_WRITE_ENABLE_315 0x2F
-
-#define XGI_PASSWORD 0x86 /* SR05 */
-
#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
#define XGI_DRAM_SIZE_1MB 0x00
#define XGI_DRAM_SIZE_2MB 0x01
@@ -88,37 +63,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGI_DRAM_SIZE_128MB 0x07
#define XGI_DRAM_SIZE_256MB 0x08
-#define XGI_ENABLE_2D 0x40 /* SR1E */
-
-#define XGI_MEM_MAP_IO_ENABLE 0x01 /* SR20 */
-#define XGI_PCI_ADDR_ENABLE 0x80
-
-#define XGI_SIMULTANEOUS_VIEW_ENABLE 0x01 /* CR30 */
-#define XGI_VB_OUTPUT_COMPOSITE 0x04
-#define XGI_VB_OUTPUT_SVIDEO 0x08
-#define XGI_VB_OUTPUT_SCART 0x10
-#define XGI_VB_OUTPUT_LCD 0x20
-#define XGI_VB_OUTPUT_CRT2 0x40
-#define XGI_VB_OUTPUT_HIVISION 0x80
-
-#define XGI_VB_OUTPUT_DISABLE 0x20 /* CR31 */
-#define XGI_DRIVER_MODE 0x40
-
-#define XGI_VB_COMPOSITE 0x01 /* CR32 */
-#define XGI_VB_SVIDEO 0x02
-#define XGI_VB_SCART 0x04
-#define XGI_VB_LCD 0x08
-#define XGI_VB_CRT2 0x10
-#define XGI_CRT1 0x20
-#define XGI_VB_HIVISION 0x40
-#define XGI_VB_YPBPR 0x80
-#define XGI_VB_TV (XGI_VB_COMPOSITE | XGI_VB_SVIDEO | \
- XGI_VB_SCART | XGI_VB_HIVISION|XGI_VB_YPBPR)
-
-#define XGI_EXTERNAL_CHIP_MASK 0x0E /* CR37 */
-#define XGI310_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
-#define XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL 0x03 /* in CR37 << 1 ! */
-
/* ------------------- Global Variables ----------------------------- */
/* display status */
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 2502c49c9c5..21c037827de 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -4,6 +4,8 @@
* Base on TW's sis fbdev code.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/* #include <linux/config.h> */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -55,7 +57,7 @@ static unsigned int refresh_rate;
#undef XGIFBDEBUG
#ifdef XGIFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) pr_debug("%s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -142,7 +144,7 @@ static inline void dumpVGAReg(void)
#if 1
#define DEBUGPRN(x)
#else
-#define DEBUGPRN(x) printk(KERN_INFO x "\n");
+#define DEBUGPRN(x) pr_info(x "\n");
#endif
/* --------------- Hardware Access Routines -------------------------- */
@@ -369,15 +371,15 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
XGI_Pr->P3c9 = BaseAddr + 0x19;
XGI_Pr->P3da = BaseAddr + 0x2A;
/* Digital video interface registers (LCD) */
- XGI_Pr->Part1Port = BaseAddr + XGI_CRT2_PORT_04;
+ XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04;
/* 301 TV Encoder registers */
- XGI_Pr->Part2Port = BaseAddr + XGI_CRT2_PORT_10;
+ XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10;
/* 301 Macrovision registers */
- XGI_Pr->Part3Port = BaseAddr + XGI_CRT2_PORT_12;
+ XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12;
/* 301 VGA2 (and LCD) registers */
- XGI_Pr->Part4Port = BaseAddr + XGI_CRT2_PORT_14;
+ XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
/* 301 palette address port registers */
- XGI_Pr->Part5Port = BaseAddr + XGI_CRT2_PORT_14 + 2;
+ XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
}
@@ -424,7 +426,7 @@ static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
i++;
}
if (!j)
- printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name);
+ pr_info("Invalid mode '%s'\n", name);
}
static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
@@ -449,7 +451,7 @@ static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
invalid:
if (!j)
- printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
+ pr_info("Invalid VESA mode 0x%x'\n", vesamode);
}
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
@@ -526,12 +528,6 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
xres = 1600;
yres = 1200;
break;
- /* case LCD_320x480: */ /* TW: FSTN */
- /*
- xres = 320;
- yres = 480;
- break;
- */
default:
xres = 0;
yres = 0;
@@ -692,7 +688,7 @@ static void XGIfb_search_crt2type(const char *name)
i++;
}
if (XGIfb_crt2type < 0)
- printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name);
+ pr_info("Invalid CRT2 type: %s\n", name);
}
static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
@@ -742,7 +738,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
if (xgifb_info->rate_idx > 0) {
return xgifb_info->rate_idx;
} else {
- printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n",
+ pr_info("Unsupported rate %d for %dx%d\n",
rate, xres, yres);
return 0;
}
@@ -811,27 +807,27 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
switch (xgifb_info->display2) {
case XGIFB_DISP_CRT:
- cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_LCD:
- cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_TV:
if (xgifb_info->TV_type == TVMODE_HIVISION)
- cr30 = (XGI_VB_OUTPUT_HIVISION
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_HIVISION
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
- cr30 = (XGI_VB_OUTPUT_SVIDEO
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_SVIDEO
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
- cr30 = (XGI_VB_OUTPUT_COMPOSITE
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_COMPOSITE
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SCART)
- cr30 = (XGI_VB_OUTPUT_SCART
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_SCART
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
cr31 |= 0x01;
@@ -840,7 +836,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
break;
default: /* disable CRT2 */
cr30 = 0x00;
- cr31 |= (XGI_DRIVER_MODE | XGI_VB_OUTPUT_DISABLE);
+ cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
}
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
@@ -854,7 +850,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
u8 reg;
unsigned char doit = 1;
/*
- xgifb_reg_set(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD);
+ xgifb_reg_set(XGISR,IND_SIS_PASSWORD,SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, 0x00);
xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
*test*
@@ -890,7 +886,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
reg |= 0x80;
xgifb_reg_set(XGICR, 0x17, reg);
- xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04);
+ xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04);
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
@@ -923,7 +919,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
break;
}
xgifb_reg_or(XGIPART1,
- IND_XGI_CRT2_WRITE_ENABLE_315,
+ SIS_CRT2_WENABLE_315,
0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
@@ -1118,7 +1114,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (!htotal || !vtotal) {
DPRINTK("XGIfb: Invalid 'var' information\n");
return -EINVAL;
- } printk(KERN_DEBUG "XGIfb: var->pixclock=%d, htotal=%d, vtotal=%d\n",
+ } pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
var->pixclock, htotal, vtotal);
if (var->pixclock && htotal && vtotal) {
@@ -1130,7 +1126,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->refresh_rate = 60;
}
- printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n",
+ pr_debug("Change mode to %dx%dx%d-%dHz\n",
var->xres,
var->yres,
var->bits_per_pixel,
@@ -1158,7 +1154,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->mode_idx = -1;
if (xgifb_info->mode_idx < 0) {
- printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n",
+ pr_err("Mode %dx%dx%d not supported\n",
var->xres, var->yres, var->bits_per_pixel);
xgifb_info->mode_idx = old_mode;
return -EINVAL;
@@ -1177,14 +1173,14 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
- printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
+ pr_err("Setting mode[0x%x] failed\n",
XGIbios_mode[xgifb_info->mode_idx].mode_no);
return -EINVAL;
}
info->fix.line_length = ((info->var.xres_virtual
* info->var.bits_per_pixel) >> 6);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
xgifb_reg_set(XGISR,
@@ -1239,7 +1235,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_ERR "XGIfb: Unsupported depth %d",
+ pr_err("Unsupported depth %d",
xgifb_info->video_bpp);
break;
}
@@ -1273,7 +1269,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
@@ -1282,7 +1278,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- xgifb_reg_or(XGIPART1, IND_XGI_CRT2_WRITE_ENABLE_315, 0x01);
+ xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01);
xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
@@ -1387,7 +1383,7 @@ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
fix->line_length = xgifb_info->video_linelength;
fix->mmio_start = xgifb_info->mmio_base;
fix->mmio_len = xgifb_info->mmio_size;
- fix->accel = FB_ACCEL_XGI_XABRE;
+ fix->accel = FB_ACCEL_SIS_XABRE;
DEBUGPRN("end of get_fix");
return 0;
@@ -1441,7 +1437,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate =
(unsigned int) (hrate * 2 / vtotal);
- printk(KERN_DEBUG
+ pr_debug(
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
__func__, var->pixclock, htotal, vtotal,
@@ -1479,7 +1475,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (!found_mode) {
- printk(KERN_ERR "XGIfb: %dx%dx%d is no valid mode\n",
+ pr_err("%dx%dx%d is no valid mode\n",
var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
@@ -1498,11 +1494,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (found_mode) {
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
- printk(KERN_DEBUG "XGIfb: Adapted to mode %dx%dx%d\n",
+ pr_debug("Adapted to mode %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
} else {
- printk(KERN_ERR "XGIfb: Failed to find similar mode to %dx%dx%d\n",
+ pr_err("Failed to find similar mode to %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
@@ -1634,9 +1630,9 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xorg driver sets 32MB * 1 channel */
if (xgifb_info->chip == XG27)
- xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51);
+ xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51);
- reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE);
+ reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE);
switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
case XGI_DRAM_SIZE_1MB:
xgifb_info->video_size = 0x100000;
@@ -1711,7 +1707,7 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */
/* xgifb_info->video_size = 0x1000000; */ /* benchmark */
- printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n",
+ pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
reg,
xgifb_info->video_size, ChannelNum);
return 0;
@@ -1736,7 +1732,7 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
- if ((cr32 & XGI_CRT1) && !XGIfb_crt1off)
+ if ((cr32 & SIS_CRT1) && !XGIfb_crt1off)
XGIfb_crt1off = 0;
else {
if (cr32 & 0x5F)
@@ -1746,11 +1742,11 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
}
if (!xgifb_info->display2_force) {
- if (cr32 & XGI_VB_TV)
+ if (cr32 & SIS_VB_TV)
xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
+ else if (cr32 & SIS_VB_LCD)
xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
+ else if (cr32 & SIS_VB_CRT2)
xgifb_info->display2 = XGIFB_DISP_CRT;
else
xgifb_info->display2 = XGIFB_DISP_NONE;
@@ -1759,14 +1755,14 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
xgifb_info->TV_plug = XGIfb_tvplug;
- else if (cr32 & XGI_VB_HIVISION) {
+ else if (cr32 & SIS_VB_HIVISION) {
xgifb_info->TV_type = TVMODE_HIVISION;
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & XGI_VB_SVIDEO)
+ } else if (cr32 & SIS_VB_SVIDEO)
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- else if (cr32 & XGI_VB_COMPOSITE)
+ else if (cr32 & SIS_VB_COMPOSITE)
xgifb_info->TV_plug = TVPLUG_COMPOSITE;
- else if (cr32 & XGI_VB_SCART)
+ else if (cr32 & SIS_VB_SCART)
xgifb_info->TV_plug = TVPLUG_SCART;
if (xgifb_info->TV_type == 0) {
@@ -1811,11 +1807,11 @@ static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
if (!XGIfb_has_VB(xgifb_info)) {
reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
- switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) {
- case XGI310_EXTERNAL_CHIP_LVDS:
+ switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
+ case SIS_EXTERNAL_CHIP_LVDS:
xgifb_info->hasVB = HASVB_LVDS;
break;
- case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL:
+ case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
break;
default:
@@ -1917,7 +1913,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base;
/* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
- printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
+ pr_info("Relocate IO address: %lx [%08lx]\n",
(unsigned long)pci_resource_start(pdev, 2),
xgifb_info->dev_info.RelIO);
@@ -1933,17 +1929,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
- reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
+ reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
if (reg1 != 0xa1) { /*I/O error */
- printk("\nXGIfb: I/O error!!!");
+ pr_err("I/O error!!!");
ret = -EIO;
goto error;
}
switch (xgifb_info->chip_id) {
- case PCI_DEVICE_ID_XG_20:
+ case PCI_DEVICE_ID_XGI_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
if (CR48&GPIOG_READ)
@@ -1951,16 +1947,16 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
else
xgifb_info->chip = XG20;
break;
- case PCI_DEVICE_ID_XG_40:
+ case PCI_DEVICE_ID_XGI_40:
xgifb_info->chip = XG40;
break;
- case PCI_DEVICE_ID_XG_41:
+ case PCI_DEVICE_ID_XGI_41:
xgifb_info->chip = XG41;
break;
- case PCI_DEVICE_ID_XG_42:
+ case PCI_DEVICE_ID_XGI_42:
xgifb_info->chip = XG42;
break;
- case PCI_DEVICE_ID_XG_27:
+ case PCI_DEVICE_ID_XGI_27:
xgifb_info->chip = XG27;
break;
default:
@@ -1968,31 +1964,31 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
- printk("XGIfb:chipid = %x\n", xgifb_info->chip);
+ pr_info("chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
if (XGIfb_get_dram_size(xgifb_info)) {
- printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
+ pr_err("Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
goto error;
}
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
- IND_XGI_PCI_ADDRESS_SET,
- (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
+ IND_SIS_PCI_ADDRESS_SET,
+ (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
- xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
+ xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
hw_info->ulVideoMemorySize = xgifb_info->video_size;
if (!request_mem_region(xgifb_info->video_base,
xgifb_info->video_size,
"XGIfb FB")) {
- printk("unable request memory size %x",
+ pr_err("unable request memory size %x\n",
xgifb_info->video_size);
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n");
- printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n");
+ pr_err("Fatal error: Unable to reserve frame buffer memory\n");
+ pr_err("Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error;
}
@@ -2000,7 +1996,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (!request_mem_region(xgifb_info->mmio_base,
xgifb_info->mmio_size,
"XGIfb MMIO")) {
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n");
+ pr_err("Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_0;
}
@@ -2010,20 +2006,18 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
xgifb_info->mmio_size);
- printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
+ pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
- printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
+ pr_info("MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
- printk("XGIfb: XGIInitNew() ...");
+
pci_set_drvdata(pdev, xgifb_info);
- if (XGIInitNew(pdev))
- printk("OK\n");
- else
- printk("Fail\n");
+ if (!XGIInitNew(pdev))
+ pr_err("XGIInitNew() failed!\n");
xgifb_info->mtrr = (unsigned int) 0;
@@ -2033,13 +2027,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0) {
+ if ((CR38&0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
- } else if ((CR38&0xE0) == 0x60) {
+ else if ((CR38&0xE0) == 0x60)
xgifb_info->hasVB = HASVB_CHRONTEL;
- } else {
+ else
xgifb_info->hasVB = HASVB_NONE;
- }
} else {
XGIfb_get_VB_type(xgifb_info);
}
@@ -2053,10 +2046,10 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI301LV bridge detected (revision 0x%02x)\n", reg);
}
/* else if (reg >= 0xB0) {
hw_info->ujVBChipID = VB_CHIP_301B;
@@ -2065,17 +2058,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} */
else {
hw_info->ujVBChipID = VB_CHIP_301;
- printk("XGIfb: XGI301 bridge detected\n");
+ pr_info("XGI301 bridge detected\n");
}
break;
case HASVB_302:
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xB0) {
reg1 = xgifb_reg_get(XGIPART4, 0x23);
@@ -2083,27 +2076,27 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} else {
hw_info->ujVBChipID = VB_CHIP_302;
- printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
+ pr_info("XGI302 bridge detected\n");
}
break;
case HASVB_LVDS:
hw_info->ulExternalChip = 0x1;
- printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
+ pr_info("LVDS transmitter detected\n");
break;
case HASVB_TRUMPION:
hw_info->ulExternalChip = 0x2;
- printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
+ pr_info("Trumpion Zurac LVDS scaler detected\n");
break;
case HASVB_CHRONTEL:
hw_info->ulExternalChip = 0x4;
- printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
+ pr_info("Chrontel TV encoder detected\n");
break;
case HASVB_LVDS_CHRONTEL:
hw_info->ulExternalChip = 0x5;
- printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
+ pr_info("LVDS transmitter and Chrontel TV encoder detected\n");
break;
default:
- printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
+ pr_info("No or unknown bridge type detected\n");
break;
}
@@ -2117,10 +2110,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
reg &= 0x0f;
hw_info->ulCRT2LCDType = XGI310paneltype[reg];
-
- } else {
- /* TW: FSTN/DSTN */
- hw_info->ulCRT2LCDType = LCD_320x480;
}
}
@@ -2147,9 +2136,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (tmp & 0x20) {
tmp = xgifb_reg_get(
XGIPART1, 0x13);
- if (tmp & 0x04) {
- /* XGI_Pr.XGI_UseLCDA = 1; */
- }
}
}
}
@@ -2222,12 +2208,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_INFO "XGIfb: Unsupported depth %d",
+ pr_info("Unsupported depth %d\n",
xgifb_info->video_bpp);
break;
}
- printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
+ pr_info("Default mode is %dx%dx%d (%dHz)\n",
xgifb_info->video_width,
xgifb_info->video_height,
xgifb_info->video_bpp,
@@ -2404,7 +2390,7 @@ MODULE_PARM_DESC(filter,
static void __exit xgifb_remove_module(void)
{
pci_unregister_driver(&xgifb_driver);
- printk(KERN_DEBUG "xgifb: Module unloaded\n");
+ pr_debug("Module unloaded\n");
}
module_exit(xgifb_remove_module);
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 2c866bb65a0..37bb730de04 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -3,8 +3,8 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-#include "vb_struct.h"
#include "vgatypes.h"
+#include "vb_struct.h"
enum xgifb_display_type {
XGIFB_DISP_NONE = 0,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 5beeef99bb1..c7317931f67 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,153 +1,48 @@
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/xgi/initdef.h
* ,v 1.4 2000/12/02 01:16:17 dawes Exp $*/
-#ifndef _INITDEF_
-#define _INITDEF_
+#ifndef _VB_DEF_
+#define _VB_DEF_
+#include "../../video/sis/initdef.h"
#define VB_XGI301C 0x0020 /* for 301C */
-/*end 301b*/
-
-#define VB_YPbPr525p 0x01
-#define VB_YPbPr750p 0x02
#define VB_YPbPr1080i 0x03
#define LVDSCRT1Len 15
-
-#define SupportCHTV 0x0800
#define SupportCRT2in301C 0x0100 /* for 301C */
#define SetCHTVOverScan 0x8000
-#define PanelRGB18Bit 0x0100
-#define PanelRGB24Bit 0x0000
-#define Panel320x480 0x07 /*fstn*/
+#define Panel_320x480 0x07 /*fstn*/
/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
-#define Panel800x600 0x01
-#define Panel1024x768 0x02
-#define Panel1024x768x75 0x22
-#define Panel1280x1024 0x03
-#define Panel1280x1024x75 0x23
-#define Panel640x480 0x04
-#define Panel1280x960 0x07
-#define Panel1400x1050 0x09
-#define Panel1600x1200 0x0B
+#define Panel_1024x768x75 0x22
+#define Panel_1280x1024x75 0x23
#define PanelRef60Hz 0x00
#define PanelRef75Hz 0x20
-#define CRT2DisplayFlag 0x2000
-
#define YPbPr525iVCLK 0x03B
#define YPbPr525iVCLK_2 0x03A
#define XGI_CRT2_PORT_00 (0x00 - 0x030)
-#define XGI_CRT2_PORT_04 (0x04 - 0x030)
-#define XGI_CRT2_PORT_10 (0x10 - 0x30)
-#define XGI_CRT2_PORT_12 (0x12 - 0x30)
-#define XGI_CRT2_PORT_14 (0x14 - 0x30)
-
-#define _PanelType00 0x00
-#define _PanelType01 0x08
-#define _PanelType02 0x10
-#define _PanelType03 0x18
-#define _PanelType04 0x20
-#define _PanelType05 0x28
-#define _PanelType06 0x30
-#define _PanelType07 0x38
-#define _PanelType08 0x40
-#define _PanelType09 0x48
-#define _PanelType0A 0x50
-#define _PanelType0B 0x58
-#define _PanelType0C 0x60
-#define _PanelType0D 0x68
-#define _PanelType0E 0x70
-#define _PanelType0F 0x78
/* =============================================================
for 310
============================================================== */
-/* add LCDDataList for GetLCDPtr */
-#define LCDDataList (VBIOSTablePointerStart+0x22)
-/* */
-/* Modify from 310.inc */
-/* */
-/* */
-
#define ModeSoftSetting 0x04
-#define BoardTVType 0x02
-
-#define SoftDRAMType 0x80 /* DRAMSetting */
-
/* ---------------- SetMode Stack */
#define CRT1Len 15
#define VCLKLen 4
-#define VGA_XGI340 0x0001 /* 340 series */
-
-#define VB_XGI301 0x0001 /* VB Type Info */
-#define VB_XGI301B 0x0002 /* 301 series */
-#define VB_XGI302B 0x0004
-#define VB_NoLCD 0x8000
-#define VB_XGI301LV 0x0008
-#define VB_XGI302LV 0x0010
-#define VB_LVDS_NS 0x0001 /* 3rd party chip */
-
-#define ModeInfoFlag 0x0007
-#define ModeText 0x0000
-#define ModeEGA 0x0002 /* 16 colors mode */
-#define ModeVGA 0x0003 /* 256 colors mode */
-
-#define DACInfoFlag 0x0018
-
-#define MemoryInfoFlag 0x01e0
-#define MemorySizeShift 5
-
-#define Charx8Dot 0x0200
-#define LineCompareOff 0x0400
-#define CRT2Mode 0x0800
-#define HalfDCLK 0x1000
-#define NoSupportSimuTV 0x2000
-#define DoubleScanMode 0x8000
-
-/* -------------- Ext_InfoFlag */
-#define Support16Bpp 0x0005
-#define Support32Bpp 0x0007
#define SupportAllCRT2 0x0078
-#define SupportTV 0x0008
-#define SupportHiVisionTV 0x0010
-#define SupportLCD 0x0020
-#define SupportRAMDAC2 0x0040
#define NoSupportTV 0x0070
#define NoSupportHiVisionTV 0x0060
#define NoSupportLCD 0x0058
-#define SupportTV1024 0x0800 /* 301btest */
-#define SupportYPbPr 0x1000 /* 301lv */
-#define InterlaceMode 0x0080
-#define SyncPP 0x0000
-#define SyncPN 0x4000
-#define SyncNP 0x8000
-#define SyncNN 0xC000
/* -------------- SetMode Stack/Scratch */
-#define SetSimuScanMode 0x0001 /* VBInfo/CR30 & CR31 */
-#define SwitchToCRT2 0x0002
-#define SetCRT2ToTV 0x089C
-#define SetCRT2ToAVIDEO 0x0004
-#define SetCRT2ToSVIDEO 0x0008
-#define SetCRT2ToSCART 0x0010
-#define SetCRT2ToLCD 0x0020
-#define SetCRT2ToRAMDAC 0x0040
-#define SetCRT2ToHiVisionTV 0x0080
-#define SetCRT2ToLCDA 0x0100
-#define SetInSlaveMode 0x0200
-#define SetNotSimuMode 0x0400
-#define SetCRT2ToYPbPr 0x0800
-#define LoadDACFlag 0x1000
-#define DisableCRT2Display 0x2000
-#define DriverMode 0x4000
+#define XGI_SetCRT2ToLCDA 0x0100
#define SetCRT2ToDualEdge 0x8000
-#define ProgrammingCRT2 0x0001 /* Set Flag */
#define ReserveTVOption 0x0008
#define GatingCRT 0x0800
#define DisableChB 0x1000
@@ -155,23 +50,14 @@
#define DisableChA 0x4000
#define EnableChA 0x8000
-#define SetNTSCTV 0x0000 /* TV Info */
-#define SetPALTV 0x0001
-#define SetNTSCJ 0x0002
-#define SetPALMTV 0x0004
-#define SetPALNTV 0x0008
-#define SetYPbPrMode525i 0x0020
-#define SetYPbPrMode525p 0x0040
-#define SetYPbPrMode750p 0x0080
-#define SetYPbPrMode1080i 0x0100
#define SetTVLowResolution 0x0400
#define TVSimuMode 0x0800
#define RPLLDIV2XO 0x1000
#define NTSC1024x768 0x2000
#define SetTVLockMode 0x4000
-#define LCDVESATiming 0x0001 /* LCD Info/CR37 */
-#define EnableLVDSDDA 0x0002
+#define XGI_LCDVESATiming 0x0001 /* LCD Info/CR37 */
+#define XGI_EnableLVDSDDA 0x0002
#define EnableScalingLCD 0x0008
#define SetPWDEnable 0x0004
#define SetLCDtoNonExpanding 0x0010
@@ -184,7 +70,7 @@
#define EnableLCD24bpp 0x0004 /* default */
#define DisableLCD24bpp 0x0000
#define LCDPolarity 0x00c0 /* default: SyncNN */
-#define LCDDualLink 0x0100
+#define XGI_LCDDualLink 0x0100
#define EnableSpectrum 0x0200
#define PWDEnable 0x0400
#define EnableVBCLKDRVLOW 0x4000
@@ -206,31 +92,21 @@
#define TVSense 0xc7
-#define TVOverScan 0x10 /* CR35 */
-
#define YPbPrMode 0xe0
#define YPbPrMode525i 0x00
#define YPbPrMode525p 0x20
#define YPbPrMode750p 0x40
#define YPbPrMode1080i 0x60
-
-#define LCDRGB18Bit 0x01 /* CR37 */
-#define LCDNonExpanding 0x10
-#define LCDSync 0x20
-#define LCDSyncBit 0xe0 /* H/V polarity & sync ID */
-
#define ScalingLCD 0x08
-#define EnableDualEdge 0x01 /* CR38 */
-#define SetToLCDA 0x02
#define SetYPbPr 0x04
/* ---------------------- VUMA Information */
#define DisplayDeviceFromCMOS 0x10
/* ---------------------- HK Evnet Definition */
-#define ModeSwitchStatus 0xf0
+#define XGI_ModeSwitchStatus 0xf0
#define ActiveCRT1 0x10
#define ActiveLCD 0x0020
#define ActiveTV 0x40
@@ -246,28 +122,13 @@
/* translated from asm code 301def.h */
/* */
/* --------------------------------------------------------- */
-#define LCDDataLen 8
-#define TVDataLen 12
#define LVDSCRT1Len_H 8
#define LVDSCRT1Len_V 7
-#define LVDSDataLen 6
-#define LVDSDesDataLen 6
#define LCDDesDataLen 6
#define LVDSDesDataLen2 8
#define LCDDesDataLen2 8
-#define CHTVRegLen 16
-#define StHiTVHT 892
-#define StHiTVVT 1126
-#define StHiTextTVHT 1000
-#define StHiTextTVVT 1126
-#define ExtHiTVHT 2100
-#define ExtHiTVVT 1125
-#define NTSCHT 1716
-#define NTSCVT 525
#define NTSC1024x768HT 1908
-#define PALHT 1728
-#define PALVT 625
#define YPbPrTV525iHT 1716 /* YPbPr */
#define YPbPrTV525iVT 525
@@ -276,22 +137,16 @@
#define YPbPrTV750pHT 1650
#define YPbPrTV750pVT 750
-#define CRT2Delay1 0x04 /* XGI301 */
-#define CRT2Delay2 0x0A /* 301B,302 */
-
-
#define VCLK25_175 0x00
#define VCLK28_322 0x01
#define VCLK31_5 0x02
#define VCLK36 0x03
-#define VCLK40 0x04
#define VCLK43_163 0x05
#define VCLK44_9 0x06
#define VCLK49_5 0x07
#define VCLK50 0x08
#define VCLK52_406 0x09
#define VCLK56_25 0x0A
-#define VCLK65 0x0B
#define VCLK68_179 0x0D
#define VCLK72_852 0x0E
#define VCLK75 0x0F
@@ -300,7 +155,6 @@
#define VCLK83_95 0x13
#define VCLK86_6 0x15
#define VCLK94_5 0x16
-#define VCLK108_2 0x19
#define VCLK113_309 0x1B
#define VCLK116_406 0x1C
#define VCLK135_5 0x1E
@@ -327,16 +181,10 @@
#define VCLK125_999 0x51
#define VCLK148_5 0x52
#define VCLK217_325 0x55
-#define YPbPr750pVCLK 0x57
+#define XGI_YPbPr750pVCLK 0x57
-#define TVVCLKDIV2 0x3A
-#define TVVCLK 0x3B
-#define HiTVVCLKDIV2 0x3C
-#define HiTVVCLK 0x3D
-#define HiTVSimuVCLK 0x3E
-#define HiTVTextVCLK 0x3F
#define VCLK39_77 0x40
-#define YPbPr525pVCLK 0x3A
+#define YPbPr525pVCLK 0x3A
#define NTSC1024VCLK 0x41
#define VCLK35_2 0x49 /* ; 800x480 */
#define VCLK122_61 0x4A
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 4ccd988ffd7..94d5c35e22f 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -3,8 +3,8 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include "vgatypes.h"
#include "XGIfb.h"
+#include "vgatypes.h"
#include "vb_def.h"
#include "vb_struct.h"
@@ -1268,7 +1268,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1287,7 +1287,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1299,9 +1299,9 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
tempcl |= SetSimuScanMode;
if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
|| (temp & ActiveCRT2)))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1516,11 +1516,11 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
printk("5");
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 67a316c3c10..2919924213c 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,20 +61,20 @@ static const unsigned short XGINew_VGA_DAC[] = {
void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable;
- pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable;
+ pVBInfo->StandTable = (struct SiS_StandTable_S *) XGI330_StandTable;
pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable;
pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex;
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
+ pVBInfo->MCLKData = (struct SiS_MCLKData *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
- pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
- pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData;
+ pVBInfo->VCLKData = (struct SiS_VCLKData *) XGI_VCLKData;
+ pVBInfo->VBVCLKData = (struct SiS_VBVCLKData *) XGI_VBVCLKData;
pVBInfo->ScreenOffset = XGI330_ScreenOffset;
- pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo;
+ pVBInfo->StResInfo = (struct SiS_StResInfo_S *) XGI330_StResInfo;
pVBInfo->ModeResInfo
- = (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo;
+ = (struct SiS_ModeResInfo_S *) XGI330_ModeResInfo;
pVBInfo->pOutputSelect = &XGI330_OutputSelect;
pVBInfo->pSoftSetting = &XGI330_SoftSetting;
@@ -138,7 +138,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
/* 310 customization related */
- if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
else
pVBInfo->LCDCapList = XGI_LCDCapList;
@@ -153,7 +153,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
if (ChipType == XG27) {
pVBInfo->MCLKData
- = (struct XGI_MCLKDataStruct *) XGI27New_MCLKData;
+ = (struct SiS_MCLKData *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41;
pVBInfo->pXGINew_CR97 = &XG27_CR97;
pVBInfo->pSR36 = &XG27_SR36;
@@ -208,8 +208,8 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
tempah = pVBInfo->StandTable[StandTableIndex].SR[0];
- i = SetCRT2ToLCDA;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ i = XGI_SetCRT2ToLCDA;
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempah |= 0x01;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
@@ -263,7 +263,7 @@ static void XGI_SetATTRegs(unsigned short ModeNo,
ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i];
if (modeflag & Charx8Dot) { /* ifndef Dot9 */
if (i == 0x13) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV
@@ -356,11 +356,11 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
/* 301b */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
tempax |= SupportLCD;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- if (pVBInfo->LCDResInfo != Panel1280x960) {
+ if (pVBInfo->LCDResInfo != Panel_1280x1024) {
+ if (pVBInfo->LCDResInfo != Panel_1280x960) {
if (pVBInfo->LCDInfo &
LCDNonExpanding) {
if (resinfo >= 9) {
@@ -372,10 +372,10 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiTV */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- tempax |= SupportYPbPr;
+ tempax |= SupportYPbPr750p;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -387,7 +387,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
return 0;
}
} else {
- tempax |= SupportHiVisionTV;
+ tempax |= SupportHiVision;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -406,17 +406,17 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
SetCRT2ToSVIDEO |
SetCRT2ToSCART |
- SetCRT2ToYPbPr |
- SetCRT2ToHiVisionTV)) {
+ SetCRT2ToYPbPr525750 |
+ SetCRT2ToHiVision)) {
tempax |= SupportTV;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
tempax |= SupportTV1024;
}
- if (!(pVBInfo->VBInfo & SetPALTV)) {
+ if (!(pVBInfo->VBInfo & TVSetPAL)) {
if (modeflag & NoSupportSimuTV) {
if (pVBInfo->VBInfo &
SetInSlaveMode) {
@@ -436,7 +436,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (resinfo > 0x08)
return 0; /* 1024x768 */
- if (pVBInfo->LCDResInfo < Panel1024x768) {
+ if (pVBInfo->LCDResInfo < Panel_1024x768) {
if (resinfo > 0x07)
return 0; /* 800x600 */
@@ -1230,23 +1230,23 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LCDXlat2VCLK[4] = { VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5 };
+ unsigned short LCDXlat1VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LCDXlat2VCLK[4] = { VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5 };
unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 };
- unsigned short LVDSXlat2VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LVDSXlat3VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
+ unsigned short LVDSXlat2VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LVDSXlat3VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
unsigned short CRT2Index, VCLKIndex;
unsigned short modeflag, resinfo;
@@ -1266,36 +1266,36 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
if (pVBInfo->IF_DEF_LVDS == 0) {
CRT2Index = CRT2Index >> 6; /* for LCD */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
- if (pVBInfo->LCDResInfo != Panel1024x768)
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->LCDResInfo != Panel_1024x768)
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLK;
VCLKIndex += 25;
}
if (pVBInfo->SetFlag & TVSimuMode) {
if (modeflag & Charx8Dot) {
- VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVSimuVCLK;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVTextVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVTextVCLK;
VCLKIndex += 25;
}
}
/* 301lv */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
!(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- if (pVBInfo->VBExtInfo == VB_YPbPr750p)
- VCLKIndex = YPbPr750pVCLK;
- else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ if (pVBInfo->VBExtInfo == YPbPr750p)
+ VCLKIndex = XGI_YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == YPbPr525p)
VCLKIndex = YPbPr525pVCLK;
else if (pVBInfo->SetFlag & RPLLDIV2XO)
VCLKIndex = YPbPr525iVCLK_2;
@@ -1304,10 +1304,10 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
}
} else if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + TVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = TVVCLK;
+ VCLKIndex = TVCLKBASE_315 + TVVCLK;
VCLKIndex += 25;
}
} else { /* for CRT2 */
@@ -1329,11 +1329,11 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = CRT2Index;
VCLKIndex = VCLKIndex >> 6;
- if ((pVBInfo->LCDResInfo == Panel800x600) ||
- (pVBInfo->LCDResInfo == Panel320x480))
+ if ((pVBInfo->LCDResInfo == Panel_800x600) ||
+ (pVBInfo->LCDResInfo == Panel_320x480))
VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
- else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75))
+ else if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75))
VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
@@ -1360,9 +1360,9 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x2C,
pVBInfo->VCLKData[index].SR2C);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- } else if ((pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) && (pVBInfo->VBInfo
- & SetCRT2ToLCDA)) {
+ } else if ((pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) && (pVBInfo->VBInfo
+ & XGI_SetCRT2ToLCDA)) {
vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
RefreshRateTableIndex, HwDeviceExtension,
pVBInfo);
@@ -1801,7 +1801,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
Ext_CRT2CRTC;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (ModeNo <= 0x13)
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC2;
@@ -2128,30 +2128,30 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1024x768Data[tempal];
break;
case 3:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1280x1024Data[tempal];
else
return &XGI_ExtLCDDes1280x1024Data[tempal];
break;
case 4:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1280x1024Data[tempal];
else
return &XGI_StLCDDes1280x1024Data[tempal];
break;
case 5:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024Data[tempal];
else
return &XGI_CetLCDDes1280x1024Data[tempal];
break;
case 6:
case 7:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1400x1050[tempal];
else
return &xgifb_lcddes_1400x1050[tempal];
@@ -2163,15 +2163,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1400x1050Data2[tempal];
break;
case 10:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1600x1200Data[tempal];
else
return &XGI_ExtLCDDes1600x1200Data[tempal];
break;
case 11:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1600x1200Data[tempal];
else
return &XGI_StLCDDes1600x1200Data[tempal];
@@ -2188,15 +2188,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
break;
case 16:
case 17:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1280x1024x75[tempal];
else
return &xgifb_lcddes_1280x1024x75[tempal];
break;
case 18:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_CetLCDDes1280x1024x75Data[tempal];
@@ -2364,7 +2364,7 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 2;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -2374,18 +2374,18 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VT = LCDPtr->LCDVT;
}
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
| EnableScalingLCD))) {
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
pVBInfo->HDE = 1024;
pVBInfo->VDE = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
pVBInfo->HDE = 1280;
pVBInfo->VDE = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
pVBInfo->HDE = 1400;
pVBInfo->VDE = 1050;
} else {
@@ -2415,7 +2415,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 0;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
XGI_GetLcdPtr(tempbx, ModeNo,
ModeIdIndex,
@@ -2430,7 +2430,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 1;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
XGI_GetLcdPtr(
tempbx,
@@ -2496,7 +2496,7 @@ static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
}
if (tempbl == 0xFF) {
- pVBInfo->LCDResInfo = Panel1024x768;
+ pVBInfo->LCDResInfo = Panel_1024x768;
pVBInfo->LCDTypeInfo = 0;
i = 0;
}
@@ -2556,15 +2556,15 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempax;
/* GetLCDResInfo */
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
tempax = 1024;
tempbx = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
} else {
@@ -2682,7 +2682,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx != pVBInfo->VDE)
tempax |= 0x40;
- if (pVBInfo->LCDInfo & EnableLVDSDDA)
+ if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
@@ -2768,7 +2768,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp1 = temp1 / push3;
tempbx = (unsigned short) (temp1 & 0xffff);
- if (pVBInfo->LCDResInfo == Panel1024x768)
+ if (pVBInfo->LCDResInfo == Panel_1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
@@ -2800,7 +2800,7 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
{
unsigned short index;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -2834,35 +2834,35 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
index = XGI_GetLCDCapPtr(pVBInfo);
tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
return tempal;
/* {TV} */
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- tempal = HiTVVCLKDIV2;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ tempal = TVCLKBASE_315 + HiTVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = HiTVVCLK;
+ tempal = TVCLKBASE_315 + HiTVVCLK;
if (pVBInfo->TVInfo & TVSimuMode) {
- tempal = HiTVSimuVCLK;
+ tempal = TVCLKBASE_315 + HiTVSimuVCLK;
if (!(modeflag & Charx8Dot))
- tempal = HiTVTextVCLK;
+ tempal = TVCLKBASE_315 + HiTVTextVCLK;
}
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
- tempal = YPbPr750pVCLK;
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
+ tempal = XGI_YPbPr750pVCLK;
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempal = YPbPr525pVCLK;
return tempal;
}
@@ -2870,9 +2870,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
tempal = NTSC1024VCLK;
if (!(pVBInfo->TVInfo & NTSC1024x768)) {
- tempal = TVVCLKDIV2;
+ tempal = TVCLKBASE_315 + TVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = TVVCLK;
+ tempal = TVCLKBASE_315 + TVVCLK;
}
if (pVBInfo->VBInfo & SetCRT2ToTV)
@@ -2898,9 +2898,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
unsigned char *di_1, struct vb_device_info *pVBInfo)
{
- if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag
+ if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) && (pVBInfo->SetFlag
& ProgrammingCRT2)) {
*di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B;
*di_1 = XGI_VBVCLKData[tempal].SR2C;
@@ -2926,7 +2926,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
&& (!(pVBInfo->VBInfo & SetInSlaveMode))) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
@@ -2942,8 +2942,8 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcl = 0;
tempch = 0;
temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
@@ -2987,12 +2987,12 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
if (temp & 0x02)
tempch |= ActiveSCART;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (temp & 0x01)
tempch |= ActiveHiTV;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp = xgifb_reg_get(
pVBInfo->Part2Port,
0x4d);
@@ -3014,7 +3014,7 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
temp = tempcl;
- tempbl = ~ModeSwitchStatus;
+ tempbl = ~XGI_ModeSwitchStatus;
xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
if (!(pVBInfo->SetFlag & ReserveTVOption))
@@ -3029,19 +3029,19 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
unsigned short flag, tempbx, tempah;
if (pVBInfo->IF_DEF_LVDS == 0) {
- tempbx = VB_XGI302B;
+ tempbx = VB_SIS302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if (flag != 0x02) {
- tempbx = VB_XGI301;
+ tempbx = VB_SIS301;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
if (flag >= 0xB0) {
- tempbx = VB_XGI301B;
+ tempbx = VB_SIS301B;
if (flag >= 0xC0) {
tempbx = VB_XGI301C;
if (flag >= 0xD0) {
- tempbx = VB_XGI301LV;
+ tempbx = VB_SIS301LV;
if (flag >= 0xE0) {
- tempbx = VB_XGI302LV;
+ tempbx = VB_SIS302LV;
tempah = xgifb_reg_get(
pVBInfo->Part4Port,
0x39);
@@ -3052,7 +3052,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
- if (tempbx & (VB_XGI301B | VB_XGI302B)) {
+ if (tempbx & (VB_SIS301B | VB_SIS302B)) {
flag = xgifb_reg_get(
pVBInfo->Part4Port,
0x23);
@@ -3078,7 +3078,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
pVBInfo->SetFlag = 0;
- pVBInfo->ModeType = modeflag & ModeInfoFlag;
+ pVBInfo->ModeType = modeflag & ModeTypeMask;
tempbx = 0;
if (pVBInfo->VBType & 0xFFFF) {
@@ -3090,7 +3090,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
push = push << 8;
tempax = temp << 8;
tempbx = tempbx | tempax;
- temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr | SetCRT2ToLCDA
+ temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
| SetInSlaveMode | DisableCRT2Display);
temp = 0xFFFF ^ temp;
tempbx &= temp;
@@ -3103,9 +3103,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBType &
- (VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
if (temp & EnableDualEdge) {
tempbx |=
@@ -3113,7 +3113,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (temp & SetToLCDA)
tempbx |=
- SetCRT2ToLCDA;
+ XGI_SetCRT2ToLCDA;
}
}
}
@@ -3123,8 +3123,8 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_YPbPr == 1) {
/* [Billy] 07/05/04 */
if (((pVBInfo->IF_DEF_LVDS == 0) &&
- ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV) ||
+ ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
@@ -3134,13 +3134,13 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->P3d4,
0x35);
temp &= YPbPrMode;
- tempbx |= SetCRT2ToHiVisionTV;
+ tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
tempbx &=
- (~SetCRT2ToHiVisionTV);
+ (~SetCRT2ToHiVision);
tempbx |=
- SetCRT2ToYPbPr;
+ SetCRT2ToYPbPr525750;
}
}
}
@@ -3172,30 +3172,30 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */
if (!(pVBInfo->VBType & VB_NoLCD)) {
- if (tempbx & SetCRT2ToLCDA) {
+ if (tempbx & XGI_SetCRT2ToLCDA) {
if (tempbx & SetSimuScanMode)
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
- SwitchToCRT2));
+ SwitchCRT2));
else
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
SetCRT2ToTV |
- SwitchToCRT2));
+ SwitchCRT2));
}
}
}
/* shampoo add */
/* for driver abnormal */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) {
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (tempbx & SetCRT2ToRAMDAC) {
tempbx &= (0xFF00 |
SetCRT2ToRAMDAC |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
} else {
tempbx &= (~(SetCRT2ToRAMDAC |
@@ -3208,37 +3208,37 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx & SetCRT2ToLCD) {
tempbx &= (0xFF00 |
SetCRT2ToLCD |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
}
if (tempbx & SetCRT2ToSCART) {
tempbx &= (0xFF00 |
SetCRT2ToSCART |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (tempbx & SetCRT2ToYPbPr)
+ if (tempbx & SetCRT2ToYPbPr525750)
tempbx &= (0xFF00 |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (tempbx & SetCRT2ToHiVisionTV)
+ if (tempbx & SetCRT2ToHiVision)
tempbx &= (0xFF00 |
- SetCRT2ToHiVisionTV |
- SwitchToCRT2 |
+ SetCRT2ToHiVision |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (tempax & DisableCRT2Display) { /* Set Display Device Info */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode)))
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
tempbx = DisableCRT2Display;
}
@@ -3246,7 +3246,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((!(tempbx & DriverMode)) ||
(!(modeflag & CRT2Mode))) {
if (pVBInfo->IF_DEF_LCDA == 1) {
- if (!(tempbx & SetCRT2ToLCDA))
+ if (!(tempbx & XGI_SetCRT2ToLCDA))
tempbx |= (SetInSlaveMode |
SetSimuScanMode);
}
@@ -3255,9 +3255,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
/* LCD+TV can't support in slave mode
* (Force LCDA+TV->LCDB) */
if ((tempbx & SetInSlaveMode) &&
- (tempbx & SetCRT2ToLCDA)) {
+ (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD |
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
pVBInfo->SetFlag |= ReserveTVOption;
}
@@ -3291,43 +3291,43 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
tempbx = temp;
- if (tempbx & SetPALTV) {
+ if (tempbx & TVSetPAL) {
tempbx &= (SetCHTVOverScan |
- SetPALMTV |
- SetPALNTV |
- SetPALTV);
- if (tempbx & SetPALMTV)
+ TVSetPALM |
+ TVSetPALN |
+ TVSetPAL);
+ if (tempbx & TVSetPALM)
/* set to NTSC if PAL-M */
- tempbx &= ~SetPALTV;
+ tempbx &= ~TVSetPAL;
} else
tempbx &= (SetCHTVOverScan |
- SetNTSCJ |
- SetPALTV);
+ TVSetNTSCJ |
+ TVSetPAL);
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempbx |= SetPALTV;
+ tempbx |= TVSetPAL;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
index1 &= YPbPrMode;
if (index1 == YPbPrMode525i)
- tempbx |= SetYPbPrMode525i;
+ tempbx |= TVSetYPbPr525i;
if (index1 == YPbPrMode525p)
- tempbx = tempbx | SetYPbPrMode525p;
+ tempbx = tempbx | TVSetYPbPr525p;
if (index1 == YPbPrMode750p)
- tempbx = tempbx | SetYPbPrMode750p;
+ tempbx = tempbx | TVSetYPbPr750p;
}
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
- tempbx = tempbx | SetYPbPrMode1080i | SetPALTV;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
+ tempbx = tempbx | TVSetHiVision | TVSetPAL;
}
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
@@ -3335,25 +3335,25 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(!(pVBInfo->VBInfo & SetNotSimuMode)))
tempbx |= TVSimuMode;
- if (!(tempbx & SetPALTV) &&
+ if (!(tempbx & TVSetPAL) &&
(modeflag > 13) &&
(resinfo == 8)) /* NTSC 1024x768, */
tempbx |= NTSC1024x768;
tempbx |= RPLLDIV2XO;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode)
tempbx &= (~RPLLDIV2XO);
} else {
if (tempbx &
- (SetYPbPrMode525p | SetYPbPrMode750p))
+ (TVSetYPbPr525p | TVSetYPbPr750p))
tempbx &= (~RPLLDIV2XO);
else if (!(pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C))) {
if (tempbx & TVSimuMode)
tempbx &= (~RPLLDIV2XO);
@@ -3386,13 +3386,13 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempbx = temp & 0x0F;
if (tempbx == 0)
- tempbx = Panel1024x768; /* default */
+ tempbx = Panel_1024x768; /* default */
/* LCD75 [2003/8/22] Vicent */
- if ((tempbx == Panel1024x768) || (tempbx == Panel1280x1024)) {
+ if ((tempbx == Panel_1024x768) || (tempbx == Panel_1280x1024)) {
if (pVBInfo->VBInfo & DriverMode) {
tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempax &= 0x0F;
else
tempax = tempax >> 4;
@@ -3411,7 +3411,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
/* End of LCD75 */
- if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
return 0;
tempbx = 0;
@@ -3427,30 +3427,30 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
- if (((pVBInfo->VBType & VB_XGI302LV) || (pVBInfo->VBType
- & VB_XGI301C)) && (tempax & LCDDualLink)) {
+ if (((pVBInfo->VBType & VB_SIS302LV) || (pVBInfo->VBType
+ & VB_XGI301C)) && (tempax & XGI_LCDDualLink)) {
tempbx |= SetLCDDualLink;
}
}
if (pVBInfo->IF_DEF_LVDS == 0) {
- if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo
+ if ((pVBInfo->LCDResInfo == Panel_1400x1050) && (pVBInfo->VBInfo
& SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
== 9) && (!(tempbx & EnableScalingLCD)))
- /* set to center in 1280x1024 LCDB for Panel1400x1050 */
+ /* set to center in 1280x1024 LCDB for Panel_1400x1050 */
tempbx |= SetLCDtoNonExpanding;
}
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
if (!(tempbx & SetLCDtoNonExpanding)) {
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
} else {
if (ModeNo > 0x13) {
if (pVBInfo->LCDResInfo
- == Panel1024x768) {
+ == Panel_1024x768) {
if (resinfo == 4) {/* 512x384 */
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
}
}
}
@@ -3460,9 +3460,9 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & SetNotSimuMode)
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
} else {
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
}
pVBInfo->LCDInfo = tempbx;
@@ -3477,7 +3477,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
SetInSlaveMode |
SetCRT2ToLCD);
pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge;
}
}
@@ -3801,27 +3801,27 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->LCDResInfo == Panel1600x1200) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1600x1200) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (yres == 1024)
yres = 1056;
}
}
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (yres == 400)
yres = 405;
else if (yres == 350)
yres = 360;
- if (pVBInfo->LCDInfo & LCDVESATiming) {
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
if (yres == 360)
yres = 375;
}
}
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (!(pVBInfo->LCDInfo
& LCDNonExpanding)) {
if (yres == 350)
@@ -3848,7 +3848,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
- if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+ if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
@@ -3918,8 +3918,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
{
unsigned short tempax = 0, tempbx, modeflag, resinfo;
- struct XGI_LCDDataStruct *LCDPtr = NULL;
- struct XGI_TVDataStruct *TVPtr = NULL;
+ struct SiS_LCDData *LCDPtr = NULL;
+ struct SiS_TVData *TVPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
@@ -3942,8 +3942,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- LCDPtr = (struct XGI_LCDDataStruct *) XGI_GetLcdPtr(tempbx,
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ LCDPtr = (struct SiS_LCDData *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -3954,11 +3954,11 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
- if (pVBInfo->LCDResInfo == Panel1024x768) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
tempax = 1024;
tempbx = 768;
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 357)
tempbx = 527;
else if (pVBInfo->VGAVDE == 420)
@@ -3971,10 +3971,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 768;
} else
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1024x768x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
tempax = 1024;
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024) {
tempax = 1280;
if (pVBInfo->VGAVDE == 360)
tempbx = 768;
@@ -3984,10 +3984,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 864;
else
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024x75) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x960) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x960) {
tempax = 1280;
if (pVBInfo->VGAVDE == 350)
tempbx = 700;
@@ -3997,7 +3997,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 960;
else
tempbx = 960;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
@@ -4005,10 +4005,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = 1280;
tempbx = 1024;
}
- } else if (pVBInfo->LCDResInfo == Panel1600x1200) {
+ } else if (pVBInfo->LCDResInfo == Panel_1600x1200) {
tempax = 1600;
tempbx = 1200; /* alan 10/14/2003 */
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 350)
tempbx = 875;
else if (pVBInfo->VGAVDE == 400)
@@ -4028,7 +4028,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
tempbx = 4;
- TVPtr = (struct XGI_TVDataStruct *) XGI_GetTVPtr(tempbx,
+ TVPtr = (struct SiS_TVData *) XGI_GetTVPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -4041,7 +4041,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->RVBHRS = TVPtr->RVBHRS;
pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (resinfo == 0x08)
pVBInfo->NewFlickerMode = 0x40;
else if (resinfo == 0x09)
@@ -4066,16 +4066,16 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
}
- } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
tempax = YPbPrTV750pHT; /* Ext750pTVHT */
tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempax = YPbPrTV525pHT; /* Ext525pTVHT */
tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
- } else if (pVBInfo->TVInfo & SetYPbPrMode525i) {
+ } else if (pVBInfo->TVInfo & TVSetYPbPr525i) {
tempax = YPbPrTV525iHT; /* Ext525iTVHT */
tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4084,7 +4084,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
} else {
tempax = PALHT;
tempbx = PALVT;
- if (!(pVBInfo->TVInfo & SetPALTV)) {
+ if (!(pVBInfo->TVInfo & TVSetPAL)) {
tempax = NTSCHT;
tempbx = NTSCVT;
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4109,7 +4109,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301) { /* shampoo 0129 */
+ if (pVBInfo->VBType & VB_SIS301) { /* shampoo 0129 */
/* 301 */
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
@@ -4139,7 +4139,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- index = (modeflag & ModeInfoFlag) - ModeEGA;
+ index = (modeflag & ModeTypeMask) - ModeEGA;
if (index < 0)
index = 0;
@@ -4435,7 +4435,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
tempcx = 0x08;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
modeflag |= Charx8Dot;
tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
@@ -4451,12 +4451,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempbx & 0xFF00) >> 8;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)))
+ if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)))
temp += 2;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (pVBInfo->VBExtInfo == VB_YPbPr1080i) {
if (resinfo == 7)
temp -= 2;
@@ -4487,7 +4487,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (tempax / tempcx) - 5;
tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
temp = (tempbx & 0x00FF) - 1;
if (!(modeflag & HalfDCLK)) {
temp -= 6;
@@ -4513,19 +4513,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else if (!(modeflag & HalfDCLK)) {
temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ if (pVBInfo->LCDResInfo != Panel_1280x960 &&
pVBInfo->VGAHDE >= 800) {
temp -= 7;
if (pVBInfo->ModeType == ModeEGA &&
pVBInfo->VGAVDE == 1024) {
temp += 15;
if (pVBInfo->LCDResInfo !=
- Panel1280x1024)
+ Panel_1280x1024)
temp += 7;
}
if (pVBInfo->VGAHDE >= 1280 &&
- pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->LCDResInfo != Panel_1280x960 &&
(pVBInfo->LCDInfo & LCDNonExpanding))
temp += 28;
}
@@ -4619,8 +4619,8 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempbx;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (tempbx == 350)
tempbx += 5;
if (tempbx == 480)
@@ -4669,19 +4669,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx += tempax;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
tempbx -= 10;
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
if (pVBInfo->VBType &
- VB_XGI301LV) {
+ VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4694,12 +4694,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4713,7 +4713,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax += tempbx;
push1 = tempax; /* push ax */
- if ((pVBInfo->TVInfo & SetPALTV)) {
+ if ((pVBInfo->TVInfo & TVSetPAL)) {
if (tempbx <= 513) {
if (tempax >= 513)
tempbx = 513;
@@ -4761,7 +4761,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (temp >> 1) & 0x09;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
temp |= 0x01;
xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
@@ -4813,13 +4813,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempax |= 0x0200;
- if (!(pVBInfo->TVInfo & SetPALTV))
+ if (!(pVBInfo->TVInfo & TVSetPAL))
tempax |= 0x1000;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax |= 0x0100;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
tempax &= 0xfe00;
tempax = (tempax & 0xff00) >> 8;
@@ -4827,10 +4827,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
TimingPoint = pVBInfo->NTSCTiming;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
TimingPoint = pVBInfo->PALTiming;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
TimingPoint = pVBInfo->HiTVExtTiming;
if (pVBInfo->VBInfo & SetInSlaveMode)
@@ -4843,14 +4843,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
TimingPoint = pVBInfo->HiTVTextTiming;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
TimingPoint = pVBInfo->YPbPr525iTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
TimingPoint = pVBInfo->YPbPr525pTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
TimingPoint = pVBInfo->YPbPr750pTiming;
}
@@ -4868,10 +4868,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp &= 0x80;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax = 950;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempax = 520;
else
tempax = 440;
@@ -4884,15 +4884,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += (unsigned short) TimingPoint[0];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr)) {
+ | SetCRT2ToYPbPr525750)) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x17; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x19; /* PAL */
}
}
@@ -4903,15 +4903,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += TimingPoint[1];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr))) {
+ | SetCRT2ToYPbPr525750))) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x1D; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x52; /* PAL */
}
}
@@ -4936,7 +4936,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
push1 = tempcx; /* push cx */
tempcx += 7;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0x00FF;
@@ -4954,7 +4954,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = push2;
tempbx = tempbx + 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
tempbx = tempbx - 4;
tempcx = tempbx;
}
@@ -4970,7 +4970,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
tempcx += 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0xFF;
@@ -5005,9 +5005,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))
+ (TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
} else
tempbx = tempbx >> 1;
@@ -5016,9 +5016,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx -= 2;
temp = tempbx & 0x00FF;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (ModeNo == 0x2f)
temp += 1;
@@ -5037,9 +5037,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
temp |= ((tempbx & 0xFF00) >> 8) << 6;
- if (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (!(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
temp |= 0x10;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
@@ -5054,18 +5054,18 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) { /* TV gatingno */
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) { /* TV gatingno */
tempbx = pVBInfo->VDE;
tempcx = tempbx - 2;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p
- | SetYPbPrMode750p)))
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p
+ | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
}
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
temp = 0;
if (tempcx & 0x0400)
temp |= 0x20;
@@ -5118,8 +5118,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* 301b */
tempecx = 8 * 1024;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempecx = tempecx * 8;
}
@@ -5133,8 +5133,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (unsigned short) tempeax;
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcx = ((tempax & 0xFF00) >> 5) >> 8;
}
/* end 301b */
@@ -5161,7 +5161,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp |= 0x18;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
tempbx = 0x0382;
tempcx = 0x007e;
} else {
@@ -5178,13 +5178,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = temp << 2;
temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp |= 0x10;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
temp |= 0x20;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
temp |= 0x60;
}
@@ -5192,7 +5192,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))) {
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
@@ -5205,12 +5205,12 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* [ycchen] 01/14/03 Modify for 301C PALM Support */
if (pVBInfo->VBType & VB_XGI301C) {
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
0x08); /* PALM Mode */
}
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
0x01);
tempax--;
@@ -5219,7 +5219,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (!(pVBInfo->VBInfo & SetInSlaveMode))
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
}
@@ -5267,11 +5267,11 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
temp = 0x01;
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (pVBInfo->ModeType == ModeEGA) {
if (pVBInfo->VGAHDE >= 1024) {
temp = 0x02;
- if (pVBInfo->LCDInfo & LCDVESATiming)
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming)
temp = 0x01;
}
}
@@ -5305,14 +5305,14 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempah = pVBInfo->LCDResInfo;
tempah &= PanelResInfo;
- if ((tempah == Panel1024x768) || (tempah == Panel1024x768x75)) {
+ if ((tempah == Panel_1024x768) || (tempah == Panel_1024x768x75)) {
tempbx = 1024;
tempcx = 768;
- } else if ((tempah == Panel1280x1024) ||
- (tempah == Panel1280x1024x75)) {
+ } else if ((tempah == Panel_1280x1024) ||
+ (tempah == Panel_1280x1024x75)) {
tempbx = 1280;
tempcx = 1024;
- } else if (tempah == Panel1400x1050) {
+ } else if (tempah == Panel_1400x1050) {
tempbx = 1400;
tempcx = 1050;
} else {
@@ -5375,7 +5375,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
@@ -5405,7 +5405,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
tempcx += tempbx;
@@ -5422,10 +5422,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 525) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0xC6;
} else
@@ -5436,8 +5436,8 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
if (pVBInfo->VGAVDE == 420) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0x4F;
} else
@@ -5473,18 +5473,18 @@ static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
else
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
Tap4TimingPtr = PALTap4Timing;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if ((pVBInfo->TVInfo & SetYPbPrMode525i) ||
- (pVBInfo->TVInfo & SetYPbPrMode525p))
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
+ (pVBInfo->TVInfo & TVSetYPbPr525p))
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
Tap4TimingPtr = xgifb_tap4_timing;
i = 0;
@@ -5510,7 +5510,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) {
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision))) {
/* Set Vertical Scaling */
Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
@@ -5520,7 +5520,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
}
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)))
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision)))
/* Enable V.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
else
@@ -5543,7 +5543,7 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
} else {
@@ -5554,15 +5554,15 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & SetCRT2ToTV))
return;
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
}
- if ((pVBInfo->VBInfo & SetCRT2ToHiVisionTV) || (pVBInfo->VBInfo
- & SetCRT2ToYPbPr)) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if ((pVBInfo->VBInfo & SetCRT2ToHiVision) || (pVBInfo->VBInfo
+ & SetCRT2ToYPbPr525750)) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
return;
tempdi = pVBInfo->HiTVGroup3Data;
@@ -5572,17 +5572,17 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
tempdi = pVBInfo->HiTVGroup3Text;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempdi = pVBInfo->Ren525pGroup3;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempdi = pVBInfo->Ren750pGroup3;
for (i = 0; i <= 0x3E; i++)
xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
}
}
@@ -5637,7 +5637,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
temp = 0;
if (tempbx <= 1024)
temp = 0xA0;
@@ -5656,7 +5656,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) {
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p)) {
temp = 0x00;
if (pVBInfo->VGAHDE == 1280)
temp = 0x40;
@@ -5667,7 +5667,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
tempebx = pVBInfo->VDE;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
if (!(temp & 0xE000))
tempbx = tempbx >> 1;
}
@@ -5705,8 +5705,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
temp = 0x0028;
xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
tempax = pVBInfo->VGAHDE;
@@ -5735,7 +5735,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
if (pVBInfo->VGAHDE > 800)
xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
@@ -5744,8 +5744,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->TVInfo & (NTSC1024x768
- | SetYPbPrMode525p | SetYPbPrMode750p
- | SetYPbPrMode1080i))) {
+ | TVSetYPbPr525p | TVSetYPbPr750p
+ | TVSetHiVision))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode)
&& (!(pVBInfo->TVInfo
@@ -5785,7 +5785,7 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
Pdata = pVBInfo->Part5Port + 1;
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
- | CRT2DisplayFlag))) {
+ | DisableCRT2Display))) {
XGINew_EnableCRT2(pVBInfo);
}
}
@@ -6074,7 +6074,7 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
tempax = pVBInfo->VBInfo;
if (tempax & SetCRT2ToDualEdge)
return 0;
- else if (tempax & (DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode))
+ else if (tempax & (DisableCRT2Display | SwitchCRT2 | SetSimuScanMode))
return 1;
return 0;
@@ -6140,15 +6140,15 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0x3F;
if (!(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode))) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempah = 0x7F; /* Disable Channel A */
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
/* Disable Channel B */
tempah = 0xBF;
@@ -6166,8 +6166,8 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
/* disable part4_1f */
xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
- if (((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
+ if (((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
|| (XGI_DisableChISLCD(pVBInfo))
|| (XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
@@ -6175,16 +6175,16 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
}
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & (DisableCRT2Display | SetCRT2ToLCDA
+ & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & SetCRT2ToLCDA))
+ & XGI_SetCRT2ToLCDA))
/* Power down */
xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
}
@@ -6198,7 +6198,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
+ ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
@@ -6206,7 +6206,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) ||
+ (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
/* save Part1 index 0 */
@@ -6227,7 +6227,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
}
- if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
+ if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
@@ -6254,15 +6254,15 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempbx = 2;
- if (pVBInfo->TVInfo & SetYPbPrMode1080i)
+ if (pVBInfo->TVInfo & TVSetHiVision)
tempbx = 4;
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
tempbx = 6;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempbx = 8;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempbx = 10;
if (pVBInfo->TVInfo & TVSimuMode)
tempbx++;
@@ -6293,23 +6293,23 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
*tempcl = 0;
*tempch = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
*tempbx = 1;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 2;
- if (pVBInfo->TVInfo & SetPALNTV)
+ if (pVBInfo->TVInfo & TVSetPALN)
*tempbx = 3;
if (pVBInfo->TVInfo & NTSC1024x768) {
*tempbx = 4;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 5;
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
& TVSimuMode)) {
*tempbx += 8;
@@ -6317,8 +6317,8 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
}
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C))
(*tempch)++;
}
@@ -6328,9 +6328,9 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
unsigned char tempah, tempbl, tempbh;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA
| SetCRT2ToTV | SetCRT2ToRAMDAC)) {
tempbl = 0;
tempbh = 0;
@@ -6338,20 +6338,20 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */
tempbl = pVBInfo->XGI_TVDelayList[index];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
tempbl = pVBInfo->XGI_TVDelayList2[index];
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
tempbh = pVBInfo->LCDCapList[index].
LCD_DelayCompensation;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
tempbl = tempbh;
}
@@ -6365,7 +6365,7 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
tempah |= tempbl;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) { /* Channel A */
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { /* Channel A */
tempah &= 0x0F;
tempah |= tempbh;
}
@@ -6475,13 +6475,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) { /* 301LV/302LV only */
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
(unsigned char) (tempcx & 0x1F));
@@ -6493,14 +6493,14 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
| EnablePLLSPLOW)) >> 8));
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
- else if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
XGI_SetLCDCap_A(tempcx, pVBInfo);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (tempcx & EnableSpectrum)
SetSpectrum(pVBInfo);
}
@@ -6524,7 +6524,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
unsigned char tempah;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
return;
tempbx = XGI_GetTVPtrIndex(pVBInfo);
@@ -6648,8 +6648,8 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
@@ -6668,7 +6668,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
{
XGI_SetDelayComp(pVBInfo);
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
@@ -6676,7 +6676,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301)
+ if (pVBInfo->VBType & VB_SIS301)
XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
}
}
@@ -6732,15 +6732,15 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempbl = 0xff;
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
- | SetCRT2ToLCD | SetCRT2ToLCDA)) {
- if ((pVBInfo->VBInfo & SetCRT2ToLCDA) &&
+ | SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ if ((pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
(!(pVBInfo->VBInfo & SetSimuScanMode))) {
tempbl &= 0xf7;
tempah |= 0x01;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e,
tempbl, tempah);
} else {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempbl &= 0xf7;
tempah |= 0x01;
}
@@ -6780,7 +6780,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA)) {
+ | XGI_SetCRT2ToLCDA)) {
tempah &= (~0x08);
if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
& SetInSlaveMode))) {
@@ -6807,24 +6807,24 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
}
- if ((pVBInfo->LCDResInfo == Panel1280x1024)
- || (pVBInfo->LCDResInfo == Panel1280x1024x75))
+ if ((pVBInfo->LCDResInfo == Panel_1280x1024)
+ || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
tempah |= 0x80;
- if (pVBInfo->LCDResInfo == Panel1280x960)
+ if (pVBInfo->LCDResInfo == Panel_1280x960)
tempah |= 0x80;
xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0;
tempbl = 0xfb;
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempbl = 0xff;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempah |= 0x04; /* shampoo 0129 */
}
@@ -6849,7 +6849,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah = 0;
tempbl = 0x7f;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) {
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
tempbl = 0xff;
if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
tempah |= 0x80;
@@ -6857,7 +6857,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->LCDInfo & SetLCDDualLink) {
xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
@@ -6872,7 +6872,7 @@ static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
tempbx = 0;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempbx = 0x08A0;
}
@@ -6937,10 +6937,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
index--;
if (pVBInfo->SetFlag & ProgrammingCRT2) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
/* 301b */
temp = LCDARefreshIndex[
@@ -6983,7 +6983,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
break;
temp = pVBInfo->RefIndex[RefreshRateTableIndex + i].
Ext_InfoFlag;
- temp &= ModeInfoFlag;
+ temp &= ModeTypeMask;
if (temp < pVBInfo->ModeType)
break;
i++;
@@ -7163,8 +7163,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
if (pVBInfo->SetFlag & EnableChA) {
/* Power on */
@@ -7207,11 +7207,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
|| (!(pVBInfo->VBInfo & DisableCRT2Display))) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (!XGI_DisableChISLCD(pVBInfo)) {
if (XGI_EnableChISLCD(pVBInfo) ||
(pVBInfo->VBInfo &
- (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
/* LVDS PLL power on */
xgifb_reg_and(
pVBInfo->Part4Port,
@@ -7229,12 +7229,12 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
tempah = 0xc0;
if (!(pVBInfo->VBInfo & SetSimuScanMode)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo &
SetCRT2ToDualEdge) {
tempah = tempah & 0x40;
if (pVBInfo->VBInfo &
- SetCRT2ToLCDA)
+ XGI_SetCRT2ToLCDA)
tempah = tempah ^ 0xC0;
if (pVBInfo->SetFlag &
@@ -7271,7 +7271,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
} /* 301 */
else { /* LVDS */
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA))
+ | XGI_SetCRT2ToLCDA))
/* enable CRT2 */
xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
@@ -7311,9 +7311,9 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
pVBInfo->SetFlag &= temp;
pVBInfo->SelectCRT2Rate = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA
| SetInSlaveMode)) {
pVBInfo->SetFlag |= ProgrammingCRT2;
}
@@ -7415,11 +7415,11 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
/* for x86 Linux, XG21 LVDS */
if (HwDeviceExtension->jChipType == XG21) {
@@ -7452,20 +7452,20 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension, pVBInfo);
}
} else {
- if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
+ if (!(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info,
HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension,
pVBInfo);
@@ -7473,7 +7473,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchToCRT2)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
switch (HwDeviceExtension->ujVBChipID) {
case VB_CHIP_301:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
@@ -7504,10 +7504,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
if (ModeNo <= 0x13) {
pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex].
- St_ModeFlag & ModeInfoFlag;
+ St_ModeFlag & ModeTypeMask;
} else {
pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex].
- Ext_ModeFlag & ModeInfoFlag;
+ Ext_ModeFlag & ModeTypeMask;
}
pVBInfo->SetFlag = 0;
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 6556a0d6ff8..a5bd56af92b 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,15 +1,6 @@
#ifndef _VB_STRUCT_
#define _VB_STRUCT_
-
-struct XGI_LCDDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short LCDHT;
- unsigned short LCDVT;
-};
-
+#include "../../video/sis/vstruct.h"
struct XGI_LVDSCRT1HDataStruct {
unsigned char Reg[8];
@@ -19,22 +10,6 @@ struct XGI_LVDSCRT1VDataStruct {
unsigned char Reg[7];
};
-struct XGI_TVDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short TVHDE;
- unsigned short TVVDE;
- unsigned short RVBHRS;
- unsigned char FlickerMode;
- unsigned short HALFRVBHRS;
- unsigned char RY1COE;
- unsigned char RY2COE;
- unsigned char RY3COE;
- unsigned char RY4COE;
-};
-
struct XGI_StStruct {
unsigned char St_ModeID;
unsigned short St_ModeFlag;
@@ -47,18 +22,6 @@ struct XGI_StStruct {
unsigned char VB_StTVYFilterIndex;
};
-struct XGI_StandTableStruct {
- unsigned char CRT_COLS;
- unsigned char ROWS;
- unsigned char CHAR_HEIGHT;
- unsigned short CRT_LEN;
- unsigned char SR[4];
- unsigned char MISC;
- unsigned char CRTC[0x19];
- unsigned char ATTR[0x14];
- unsigned char GRC[9];
-};
-
struct XGI_ExtStruct {
unsigned char Ext_ModeID;
unsigned short Ext_ModeFlag;
@@ -85,39 +48,11 @@ struct XGI_Ext2Struct {
/* unsigned short ROM_OFFSET; */
};
-
-struct XGI_MCLKDataStruct {
- unsigned char SR28, SR29, SR2A;
- unsigned short CLOCK;
-};
-
struct XGI_ECLKDataStruct {
unsigned char SR2E, SR2F, SR30;
unsigned short CLOCK;
};
-struct XGI_VCLKDataStruct {
- unsigned char SR2B, SR2C;
- unsigned short CLOCK;
-};
-
-struct XGI_VBVCLKDataStruct {
- unsigned char Part4_A, Part4_B;
- unsigned short CLOCK;
-};
-
-struct XGI_StResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
-};
-
-struct XGI_ModeResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
- unsigned char XChar;
- unsigned char YChar;
-};
-
/*add for new UNIVGABIOS*/
struct XGI_LCDDesStruct {
unsigned short LCDHDES;
@@ -350,7 +285,7 @@ struct vb_device_info {
unsigned char *pCRT2Data_4_D;
unsigned char *pCRT2Data_4_E;
unsigned char *pCRT2Data_4_10;
- struct XGI_MCLKDataStruct *MCLKData;
+ struct SiS_MCLKData *MCLKData;
struct XGI_ECLKDataStruct *ECLKData;
unsigned char *XGI_TVDelayList;
@@ -380,15 +315,15 @@ struct vb_device_info {
struct XGI_TimingVStruct *TimingV;
struct XGI_StStruct *SModeIDTable;
- struct XGI_StandTableStruct *StandTable;
+ struct SiS_StandTable_S *StandTable;
struct XGI_ExtStruct *EModeIDTable;
struct XGI_Ext2Struct *RefIndex;
/* XGINew_CRT1TableStruct *CRT1Table; */
struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
- struct XGI_VCLKDataStruct *VCLKData;
- struct XGI_VBVCLKDataStruct *VBVCLKData;
- struct XGI_StResInfoStruct *StResInfo;
- struct XGI_ModeResInfoStruct *ModeResInfo;
+ struct SiS_VCLKData *VCLKData;
+ struct SiS_VBVCLKData *VBVCLKData;
+ struct SiS_StResInfo_S *StResInfo;
+ struct SiS_ModeResInfo_S *ModeResInfo;
struct XGI_XG21CRT1Struct *UpdateCRT1;
int ram_type;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index e7946f1c114..dddf261ed53 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,5 +1,5 @@
/* yilin modify for xgi20 */
-static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
+static struct SiS_MCLKData XGI340New_MCLKData[] = {
{0x16, 0x01, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x01, 200},
@@ -10,7 +10,7 @@ static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166}
};
-static struct XGI_MCLKDataStruct XGI27New_MCLKData[] = {
+static struct SiS_MCLKData XGI27New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x80, 200},
@@ -296,7 +296,7 @@ static struct XGI_ExtStruct XGI330_EModeIDTable[] = {
0x00, 0x00, 0x00, 0x00, 0x00}
};
-static struct XGI_StandTableStruct XGI330_StandTable[] = {
+static struct SiS_StandTable_S XGI330_StandTable[] = {
/* MD_0_200 */
{
0x28, 0x18, 0x08, 0x0800,
@@ -2353,109 +2353,109 @@ static struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
/*add for new UNIVGABIOS*/
static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
- {Panel1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
- {Panel1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
+ {Panel_1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
+ {Panel_1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 11}, /* XGI_NoScalingData */
- {Panel1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
- {Panel1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
- {Panel1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
- {Panel1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
+ {Panel_1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
+ {Panel_1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
{PanelRef75Hz, 0x0008, 0x0008, 18}, /* XGI_NoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0} /* End of table */
};
static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
- {Panel1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
+ {Panel_1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 12}, /* XGI_NoScalingDesData */
- {Panel1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
/* XGI_ExtLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 16},
+ {Panel_1280x1024x75, 0x0019, 0x0001, 16},
/* XGI_StLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0000, 17},
+ {Panel_1280x1024x75, 0x0019, 0x0000, 17},
/* XGI_CetLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 18},
+ {Panel_1280x1024x75, 0x0018, 0x0010, 18},
{PanelRef75Hz, 0x0008, 0x0008, 19}, /* XGI_NoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
- {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
{PanelRef60Hz, 0x0008, 0x0008, 7}, /* XGI_LVDSNoScalingData */
- {Panel1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
+ {Panel_1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
{PanelRef75Hz, 0x0008, 0x0008, 12}, /* XGI_LVDSNoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
- {Panel1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
- {Panel1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
- {Panel1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
- {Panel1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
- {Panel1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
- {Panel1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
+ {Panel_1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
{PanelRef60Hz, 0x0008, 0x0008, 8}, /* XGI_LVDSNoScalingDesData */
- {Panel1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
- {Panel1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
- {Panel1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
+ {Panel_1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
{PanelRef75Hz, 0x0008, 0x0008, 14}, /* XGI_LVDSNoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] = {
- {Panel1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
- {Panel1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
+ {Panel_1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
+ {Panel_1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
{0xFF, 0x0000, 0x0000, 0}
};
@@ -2501,225 +2501,225 @@ static unsigned short LCDLenList[] = {
/* Dual link only */
static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, LCDDualLink+DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, LCDDualLink+DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x00, 320, 400},/* 01 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
0x04, 0x20, 0x50, 320, 240},/* 02 */
- {Support32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
+ {Mode32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
0x05, 0x32, 0x51, 400, 300},/* 03 */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
- VCLK65, 0x06, 0x43, 0x52, 512, 384},/* 04 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
+ VCLK65_315, 0x06, 0x43, 0x52, 512, 384},/* 04 */
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
0x00, 0x14, 0x2f, 640, 400},/* 05 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */
- {Support32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
+ {Mode32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */
/* 17 1024x768x60Hz (LCD 1024x768x60Hz) */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
- VCLK65, 0x06, 0x47, 0x37, 1024, 768},
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
+ VCLK65_315, 0x06, 0x47, 0x37, 1024, 768},
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */
- {Support32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2,
+ {Mode32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2_315,
0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2_315,
0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/
- {Support32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
+ {Mode32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */
/* 22 1600x1200x60Hz */
- {Support32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
+ {Mode32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200},
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */
- {Support32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
+ {Mode32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */
- {Support32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
+ {Mode32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */
- {Support32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
+ {Mode32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */
- {Support32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
+ {Mode32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */
- {Support32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
+ {Mode32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */
- {Support32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
+ {Mode32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */
- {Support32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
+ {Mode32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */
- {Support32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
+ {Mode32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */
- {Support16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
+ {Mode16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */
- {Support32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
+ {Mode32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */
- {Support32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
+ {Mode32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */
- {Support32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
+ {Mode32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */
- {Support32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
+ {Mode32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */
- {Support16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
+ {Mode16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES800x480x60, VCLK39_77,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES800x480x60, VCLK39_77,
0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1024x576x60, VCLK65,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1024x576x60, VCLK65_315,
0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1280x720x60, VCLK108_2,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1280x720x60, VCLK108_2_315,
0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */
- {Support32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
+ {Mode32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */
- {Support32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
+ {Mode32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */
- {Support32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
+ {Mode32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */
- {Support32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
+ {Mode32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
};
@@ -2729,7 +2729,7 @@ static unsigned char XGI330_ScreenOffset[] = {
0x57, 0x48
};
-static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
+static struct SiS_StResInfo_S XGI330_StResInfo[] = {
{640, 400},
{640, 350},
{720, 400},
@@ -2737,7 +2737,7 @@ static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
{640, 480}
};
-static struct XGI_ModeResInfoStruct XGI330_ModeResInfo[] = {
+static struct SiS_ModeResInfo_S XGI330_ModeResInfo[] = {
{ 320, 200, 8, 8},
{ 320, 240, 8, 8},
{ 320, 400, 8, 8},
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9e166bbb00c..a7208e31581 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,6 +2,9 @@
#define _VGATYPES_
#include <linux/ioctl.h>
+#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
+#include "../../video/sis/vgatypes.h"
+#include "../../video/sis/sis.h" /* for LCD_TYPE */
#ifndef XGI_VB_CHIP_TYPE
enum XGI_VB_CHIP_TYPE {
@@ -19,6 +22,12 @@ enum XGI_VB_CHIP_TYPE {
};
#endif
+
+#define XGI_LCD_TYPE
+/* Since the merge with video/sis the LCD_TYPEs are used from
+ drivers/video/sis/sis.h . Nevertheless we keep this (for the moment) for
+ future reference until the code is merged completely and we are sure
+ nothing of this should be added to the sis.h header */
#ifndef XGI_LCD_TYPE
enum XGI_LCD_TYPE {
LCD_INVALID = 0,
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 7fabcb2bc80..3ed2c8f656a 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,13 +1,14 @@
config ZCACHE
- tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CLEANCACHE || FRONTSWAP
- select XVMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ bool "Dynamic compression of swap pages and clean pagecache pages"
+ # X86 dependency is because zsmalloc uses non-portable pte/tlb
+ # functions
+ depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86
+ select ZSMALLOC
+ select CRYPTO_LZO
default n
help
Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses lzo1x
+ performance boosts on many workloads. Zcache uses
compression and an in-kernel implementation of transcendent
memory to store clean page cache pages and swap in RAM,
providing a noticeable reduction in disk I/O.
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index ed147c4b110..0d4aa82706b 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -47,7 +47,7 @@
#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
#endif
-#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
+#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
/*
* A pool is the highest-level data structure managed by tmem and
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ef7c52bb1df..70734652f72 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -6,9 +6,10 @@
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
* and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * page-accessible memory [1] interfaces, both utilizing the crypto compression
+ * API:
* 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) xvmalloc is used for persistent pages.
+ * 2) zsmalloc is used for persistent pages.
* Xvmalloc (based on the TLSF allocator) has very low fragmentation
* so maximizes space efficiency, while zbud allows pairs (and potentially,
* in the future, more than a pair of) compressed pages to be closely linked
@@ -23,15 +24,16 @@
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
-#include <linux/lzo.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/math64.h>
+#include <linux/crypto.h>
+#include <linux/string.h>
#include "tmem.h"
-#include "../zram/xvmalloc.h" /* if built in drivers/staging */
+#include "../zsmalloc/zsmalloc.h"
#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
+ struct zs_pool *zspool;
bool allocated;
atomic_t refcount;
};
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
return cli == &zcache_host;
}
+/* crypto API for zcache */
+#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
+
+enum comp_op {
+ ZCACHE_COMPOP_COMPRESS,
+ ZCACHE_COMPOP_DECOMPRESS
+};
+
+static inline int zcache_comp_op(enum comp_op op,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct crypto_comp *tfm;
+ int ret;
+
+ BUG_ON(!zcache_comp_pcpu_tfms);
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
+ BUG_ON(!tfm);
+ switch (op) {
+ case ZCACHE_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZCACHE_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ }
+ put_cpu();
+ return ret;
+}
+
/**********
* Compression buddies ("zbud") provides for packing two (or, possibly
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -299,10 +333,12 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
struct zbud_page *zbpg =
container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbud_budlists_spinlock);
spin_lock(&zbpg->lock);
if (list_empty(&zbpg->bud_list)) {
/* ignore zombie page... see zbud_evict_pages() */
spin_unlock(&zbpg->lock);
+ spin_unlock(&zbud_budlists_spinlock);
return;
}
size = zbud_free(zh);
@@ -310,7 +346,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
if (zh_other->size == 0) { /* was unbuddied: unlist and free */
chunks = zbud_size_to_chunks(size) ;
- spin_lock(&zbud_budlists_spinlock);
BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
list_del_init(&zbpg->bud_list);
zbud_unbuddied[chunks].count--;
@@ -318,7 +353,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
zbud_free_raw_page(zbpg);
} else { /* was buddied: move remaining buddy to unbuddied list */
chunks = zbud_size_to_chunks(zh_other->size) ;
- spin_lock(&zbud_budlists_spinlock);
list_del_init(&zbpg->bud_list);
zcache_zbud_buddied_count--;
list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
@@ -407,7 +441,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
{
struct zbud_page *zbpg;
unsigned budnum = zbud_budnum(zh);
- size_t out_len = PAGE_SIZE;
+ unsigned int out_len = PAGE_SIZE;
char *to_va, *from_va;
unsigned size;
int ret = 0;
@@ -424,8 +458,9 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
to_va = kmap_atomic(page, KM_USER0);
size = zh->size;
from_va = zbud_data(zh, size);
- ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &out_len);
+ BUG_ON(ret);
BUG_ON(out_len != PAGE_SIZE);
kunmap_atomic(to_va, KM_USER0);
out:
@@ -622,8 +657,8 @@ static int zbud_show_cumul_chunk_counts(char *buf)
#endif
/**********
- * This "zv" PAM implementation combines the TLSF-based xvMalloc
- * with lzo1x compression to maximize the amount of data that can
+ * This "zv" PAM implementation combines the slab-based zsmalloc
+ * with the crypto compression API to maximize the amount of data that can
* be packed into a physical page.
*
* Zv represents a PAM page with the index and object (plus a "size" value
@@ -636,6 +671,7 @@ struct zv_hdr {
uint32_t pool_id;
struct tmem_oid oid;
uint32_t index;
+ size_t size;
DECL_SENTINEL
};
@@ -657,72 +693,72 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
static atomic_t zv_curr_dist_counts[NCHUNKS];
static atomic_t zv_cumul_dist_counts[NCHUNKS];
-static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
+static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
void *cdata, unsigned clen)
{
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int alloc_size = clen + sizeof(struct zv_hdr);
- int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- int ret;
+ struct zv_hdr *zv;
+ u32 size = clen + sizeof(struct zv_hdr);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ void *handle = NULL;
BUG_ON(!irqs_disabled());
BUG_ON(chunks >= NCHUNKS);
- ret = xv_malloc(xvpool, alloc_size,
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
+ handle = zs_malloc(pool, size);
+ if (!handle)
goto out;
atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = kmap_atomic(page, KM_USER0) + offset;
+ zv = zs_map_object(pool, handle);
zv->index = index;
zv->oid = *oid;
zv->pool_id = pool_id;
+ zv->size = clen;
SET_SENTINEL(zv, ZVH);
memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- kunmap_atomic(zv, KM_USER0);
+ zs_unmap_object(pool, handle);
out:
- return zv;
+ return handle;
}
-static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+static void zv_free(struct zs_pool *pool, void *handle)
{
unsigned long flags;
- struct page *page;
- uint32_t offset;
- uint16_t size = xv_get_object_size(zv);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ struct zv_hdr *zv;
+ uint16_t size;
+ int chunks;
+ zv = zs_map_object(pool, handle);
ASSERT_SENTINEL(zv, ZVH);
+ size = zv->size + sizeof(struct zv_hdr);
+ INVERT_SENTINEL(zv, ZVH);
+ zs_unmap_object(pool, handle);
+
+ chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
BUG_ON(chunks >= NCHUNKS);
atomic_dec(&zv_curr_dist_counts[chunks]);
- size -= sizeof(*zv);
- BUG_ON(size == 0);
- INVERT_SENTINEL(zv, ZVH);
- page = virt_to_page(zv);
- offset = (unsigned long)zv & ~PAGE_MASK;
+
local_irq_save(flags);
- xv_free(xvpool, page, offset);
+ zs_free(pool, handle);
local_irq_restore(flags);
}
-static void zv_decompress(struct page *page, struct zv_hdr *zv)
+static void zv_decompress(struct page *page, void *handle)
{
- size_t clen = PAGE_SIZE;
+ unsigned int clen = PAGE_SIZE;
char *to_va;
- unsigned size;
int ret;
+ struct zv_hdr *zv;
+ zv = zs_map_object(zcache_host.zspool, handle);
+ BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0);
- ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
- size, to_va, &clen);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
+ zv->size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0);
- BUG_ON(ret != LZO_E_OK);
+ zs_unmap_object(zcache_host.zspool, handle);
+ BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE);
}
@@ -948,8 +984,8 @@ int zcache_new_client(uint16_t cli_id)
goto out;
cli->allocated = 1;
#ifdef CONFIG_FRONTSWAP
- cli->xvpool = xv_create_pool();
- if (cli->xvpool == NULL)
+ cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
+ if (cli->zspool == NULL)
goto out;
#endif
ret = 0;
@@ -1132,14 +1168,14 @@ static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
static unsigned long zcache_curr_pers_pampd_count_max;
/* forward reference */
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
+static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
struct tmem_pool *pool, struct tmem_oid *oid,
uint32_t index)
{
void *pampd = NULL, *cdata;
- size_t clen;
+ unsigned clen;
int ret;
unsigned long count;
struct page *page = (struct page *)(data);
@@ -1180,7 +1216,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
}
/* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ total_zsize = zs_get_total_size_bytes(cli->zspool);
zv_mean_zsize = div_u64(total_zsize,
curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) {
@@ -1188,7 +1224,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
goto out;
}
}
- pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
+ pampd = (void *)zv_create(cli->zspool, pool->pool_id,
oid, index, cdata, clen);
if (pampd == NULL)
goto out;
@@ -1246,7 +1282,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else {
- zv_free(cli->xvpool, (struct zv_hdr *)pampd);
+ zv_free(cli->zspool, pampd);
atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
}
@@ -1285,25 +1321,24 @@ static struct tmem_pamops zcache_pamops = {
* zcache compression/decompression and related per-cpu stuff
*/
-#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
-#define LZO_DSTMEM_PAGE_ORDER 1
-static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+#define ZCACHE_DSTMEM_ORDER 1
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
+static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
{
int ret = 0;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- unsigned char *wmem = __get_cpu_var(zcache_workmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL || wmem == NULL))
- goto out; /* no buffer, so can't compress */
+ if (unlikely(dmem == NULL))
+ goto out; /* no buffer or no compressor so can't compress */
+ *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from, KM_USER0);
mb();
- ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
+ out_len);
+ BUG_ON(ret);
*out_va = dmem;
kunmap_atomic(from_va, KM_USER0);
ret = 1;
@@ -1311,29 +1346,48 @@ out:
return ret;
}
+static int zcache_comp_cpu_up(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
+ if (IS_ERR(tfm))
+ return NOTIFY_BAD;
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
+ return NOTIFY_OK;
+}
+
+static void zcache_comp_cpu_down(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
+}
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
+ ret = zcache_comp_cpu_up(cpu);
+ if (ret != NOTIFY_OK) {
+ pr_err("zcache: can't allocate compressor transform\n");
+ return ret;
+ }
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT,
- LZO_DSTMEM_PAGE_ORDER),
- per_cpu(zcache_workmem, cpu) =
- kzalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
+ zcache_comp_cpu_down(cpu);
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- LZO_DSTMEM_PAGE_ORDER);
+ ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
- kfree(per_cpu(zcache_workmem, cpu));
- per_cpu(zcache_workmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
while (kp->nr) {
kmem_cache_free(zcache_objnode_cache,
@@ -1918,6 +1972,44 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
+static int __init enable_zcache_compressor(char *s)
+{
+ strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
+ zcache_enabled = 1;
+ return 1;
+}
+__setup("zcache=", enable_zcache_compressor);
+
+
+static int zcache_comp_init(void)
+{
+ int ret = 0;
+
+ /* check crypto algorithm */
+ if (*zcache_comp_name != '\0') {
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret)
+ pr_info("zcache: %s not supported\n",
+ zcache_comp_name);
+ }
+ if (!ret)
+ strcpy(zcache_comp_name, "lzo");
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret) {
+ ret = 1;
+ goto out;
+ }
+ pr_info("zcache: using %s compressor\n", zcache_comp_name);
+
+ /* alloc percpu transforms */
+ ret = 0;
+ zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zcache_comp_pcpu_tfms)
+ ret = 1;
+out:
+ return ret;
+}
+
static int __init zcache_init(void)
{
int ret = 0;
@@ -1940,6 +2032,11 @@ static int __init zcache_init(void)
pr_err("zcache: can't register cpu notifier\n");
goto out;
}
+ ret = zcache_comp_init();
+ if (ret) {
+ pr_err("zcache: compressor initialization failed\n");
+ goto out;
+ }
for_each_online_cpu(cpu) {
void *pcpu = (void *)(long)cpu;
zcache_cpu_notifier(&zcache_cpu_notifier_block,
@@ -1975,7 +2072,7 @@ static int __init zcache_init(void)
old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel "
- "transcendent memory and xvmalloc\n");
+ "transcendent memory and zsmalloc\n");
if (old_ops.init != NULL)
pr_warning("zcache: frontswap_ops overridden");
}
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 3bec4dba3fe..9d11a4cb99b 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,11 +1,9 @@
-config XVMALLOC
- bool
- default n
-
config ZRAM
tristate "Compressed RAM block device support"
- depends on BLOCK && SYSFS
- select XVMALLOC
+ # X86 dependency is because zsmalloc uses non-portable pte/tlb
+ # functions
+ depends on BLOCK && SYSFS && X86
+ select ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 2a6d3213a75..7f4a3019e9c 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,4 +1,3 @@
zram-y := zram_drv.o zram_sysfs.o
obj-$(CONFIG_ZRAM) += zram.o
-obj-$(CONFIG_XVMALLOC) += xvmalloc.o \ No newline at end of file
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2a2a92d389e..7f138196b3c 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -40,7 +40,7 @@ static int zram_major;
struct zram *zram_devices;
/* Module params (documentation at end) */
-unsigned int zram_num_devices;
+static unsigned int num_devices;
static void zram_stat_inc(u32 *v)
{
@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
static void zram_free_page(struct zram *zram, size_t index)
{
- u32 clen;
- void *obj;
+ void *handle = zram->table[index].handle;
- struct page *page = zram->table[index].page;
- u32 offset = zram->table[index].offset;
-
- if (unlikely(!page)) {
+ if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
}
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- clen = PAGE_SIZE;
- __free_page(page);
+ __free_page(handle);
zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_dec(&zram->stats.pages_expand);
goto out;
}
- obj = kmap_atomic(page, KM_USER0) + offset;
- clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
- kunmap_atomic(obj, KM_USER0);
+ zs_free(zram->mem_pool, handle);
- xv_free(zram->mem_pool, page, offset);
- if (clen <= PAGE_SIZE / 2)
+ if (zram->table[index].size <= PAGE_SIZE / 2)
zram_stat_dec(&zram->stats.good_compress);
out:
- zram_stat64_sub(zram, &zram->stats.compr_size, clen);
+ zram_stat64_sub(zram, &zram->stats.compr_size,
+ zram->table[index].size);
zram_stat_dec(&zram->stats.pages_stored);
- zram->table[index].page = NULL;
- zram->table[index].offset = 0;
+ zram->table[index].handle = NULL;
+ zram->table[index].size = 0;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+ cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1);
@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
}
/* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
+ if (unlikely(!zram->table[index].handle)) {
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
handle_zero_page(bvec);
@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
uncmem = user_mem;
clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
uncmem, &clen);
if (is_partial_io(bvec)) {
@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
kfree(uncmem);
}
- kunmap_atomic(cmem, KM_USER1);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
kunmap_atomic(user_mem, KM_USER0);
/* Should NEVER happen. Return bio error if it does. */
@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].page) {
+ !zram->table[index].handle) {
memset(mem, 0, PAGE_SIZE);
return 0;
}
- cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
/* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
}
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
mem, &clen);
- kunmap_atomic(cmem, KM_USER0);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int ret;
u32 store_offset;
size_t clen;
+ void *handle;
struct zobj_header *zheader;
struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
- if (zram->table[index].page ||
+ if (zram->table[index].handle ||
zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index);
@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
store_offset = 0;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand);
- zram->table[index].page = page_store;
+ handle = page_store;
src = kmap_atomic(page, KM_USER0);
+ cmem = kmap_atomic(page_store, KM_USER1);
goto memstore;
}
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &store_offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
+ handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ if (!handle) {
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
goto out;
}
+ cmem = zs_map_object(zram->mem_pool, handle);
memstore:
- zram->table[index].offset = store_offset;
-
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
-
#if 0
/* Back-reference needed for memory defragmentation */
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -438,9 +426,15 @@ memstore:
memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ kunmap_atomic(cmem, KM_USER1);
kunmap_atomic(src, KM_USER0);
+ } else {
+ zs_unmap_object(zram->mem_pool, handle);
+ }
+
+ zram->table[index].handle = handle;
+ zram->table[index].size = clen;
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- struct page *page;
- u16 offset;
-
- page = zram->table[index].page;
- offset = zram->table[index].offset;
-
- if (!page)
+ void *handle = zram->table[index].handle;
+ if (!handle)
continue;
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(page);
+ __free_page(handle);
else
- xv_free(zram->mem_pool, page, offset);
+ zs_free(zram->mem_pool, handle);
}
vfree(zram->table);
zram->table = NULL;
- xv_destroy_pool(zram->mem_pool);
+ zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
/* Reset stats */
@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
- zram->mem_pool = xv_create_pool();
+ zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
@@ -790,13 +779,18 @@ static void destroy_device(struct zram *zram)
blk_cleanup_queue(zram->queue);
}
+unsigned int zram_get_num_devices(void)
+{
+ return num_devices;
+}
+
static int __init zram_init(void)
{
int ret, dev_id;
- if (zram_num_devices > max_num_devices) {
+ if (num_devices > max_num_devices) {
pr_warning("Invalid value for num_devices: %u\n",
- zram_num_devices);
+ num_devices);
ret = -EINVAL;
goto out;
}
@@ -808,20 +802,20 @@ static int __init zram_init(void)
goto out;
}
- if (!zram_num_devices) {
+ if (!num_devices) {
pr_info("num_devices not specified. Using default: 1\n");
- zram_num_devices = 1;
+ num_devices = 1;
}
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", zram_num_devices);
- zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
+ pr_info("Creating %u devices ...\n", num_devices);
+ zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
goto unregister;
}
- for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
+ for (dev_id = 0; dev_id < num_devices; dev_id++) {
ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
goto free_devices;
@@ -844,7 +838,7 @@ static void __exit zram_exit(void)
int i;
struct zram *zram;
- for (i = 0; i < zram_num_devices; i++) {
+ for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
destroy_device(zram);
@@ -858,8 +852,8 @@ static void __exit zram_exit(void)
pr_debug("Cleanup done!\n");
}
-module_param(zram_num_devices, uint, 0);
-MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
module_init(zram_init);
module_exit(zram_exit);
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index e5cd2469b6a..fbe8ac98704 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include "xvmalloc.h"
+#include "../zsmalloc/zsmalloc.h"
/*
* Some arbitrary value. This is just to catch
@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
- * XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
+ * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
* otherwise, xv_malloc() would always return failure.
*/
@@ -81,8 +81,8 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct table {
- struct page *page;
- u16 offset;
+ void *handle;
+ u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
} __attribute__((aligned(4)));
@@ -102,7 +102,7 @@ struct zram_stats {
};
struct zram {
- struct xv_pool *mem_pool;
+ struct zs_pool *mem_pool;
void *compress_workmem;
void *compress_buffer;
struct table *table;
@@ -124,7 +124,7 @@ struct zram {
};
extern struct zram *zram_devices;
-extern unsigned int zram_num_devices;
+unsigned int zram_get_num_devices(void);
#ifdef CONFIG_SYSFS
extern struct attribute_group zram_disk_attr_group;
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index d521122826f..a7f37717552 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -34,7 +34,7 @@ static struct zram *dev_to_zram(struct device *dev)
int i;
struct zram *zram = NULL;
- for (i = 0; i < zram_num_devices; i++) {
+ for (i = 0; i < zram_get_num_devices(); i++) {
zram = &zram_devices[i];
if (disk_to_dev(zram->disk) == dev)
break;
@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
if (zram->init_done) {
- val = xv_get_total_size_bytes(zram->mem_pool) +
+ val = zs_get_total_size_bytes(zram->mem_pool) +
((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
}
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
new file mode 100644
index 00000000000..a5ab7200626
--- /dev/null
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -0,0 +1,14 @@
+config ZSMALLOC
+ tristate "Memory allocator for compressed pages"
+ # X86 dependency is because of the use of __flush_tlb_one and set_pte
+ # in zsmalloc-main.c.
+ # TODO: convert these to portable functions
+ depends on X86
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
new file mode 100644
index 00000000000..b134848a590
--- /dev/null
+++ b/drivers/staging/zsmalloc/Makefile
@@ -0,0 +1,3 @@
+zsmalloc-y := zsmalloc-main.o
+
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
new file mode 100644
index 00000000000..09caa4f2687
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -0,0 +1,745 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifdef CONFIG_ZSMALLOC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/vmalloc.h>
+
+#include "zsmalloc.h"
+#include "zsmalloc_int.h"
+
+/*
+ * A zspage's class index and fullness group
+ * are encoded in its (first)page->mapping
+ */
+#define CLASS_IDX_BITS 28
+#define FULLNESS_BITS 4
+#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
+#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+
+/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+
+static int is_first_page(struct page *page)
+{
+ return test_bit(PG_private, &page->flags);
+}
+
+static int is_last_page(struct page *page)
+{
+ return test_bit(PG_private_2, &page->flags);
+}
+
+static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+ enum fullness_group *fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = (unsigned long)page->mapping;
+ *fullness = m & FULLNESS_MASK;
+ *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+}
+
+static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+ enum fullness_group fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
+ (fullness & FULLNESS_MASK);
+ page->mapping = (struct address_space *)m;
+}
+
+static int get_size_class_index(int size)
+{
+ int idx = 0;
+
+ if (likely(size > ZS_MIN_ALLOC_SIZE))
+ idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
+ ZS_SIZE_CLASS_DELTA);
+
+ return idx;
+}
+
+static enum fullness_group get_fullness_group(struct page *page)
+{
+ int inuse, max_objects;
+ enum fullness_group fg;
+ BUG_ON(!is_first_page(page));
+
+ inuse = page->inuse;
+ max_objects = page->objects;
+
+ if (inuse == 0)
+ fg = ZS_EMPTY;
+ else if (inuse == max_objects)
+ fg = ZS_FULL;
+ else if (inuse <= max_objects / fullness_threshold_frac)
+ fg = ZS_ALMOST_EMPTY;
+ else
+ fg = ZS_ALMOST_FULL;
+
+ return fg;
+}
+
+static void insert_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ if (*head)
+ list_add_tail(&page->lru, &(*head)->lru);
+
+ *head = page;
+}
+
+static void remove_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ BUG_ON(!*head);
+ if (list_empty(&(*head)->lru))
+ *head = NULL;
+ else if (*head == page)
+ *head = (struct page *)list_entry((*head)->lru.next,
+ struct page, lru);
+
+ list_del_init(&page->lru);
+}
+
+static enum fullness_group fix_fullness_group(struct zs_pool *pool,
+ struct page *page)
+{
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group currfg, newfg;
+
+ BUG_ON(!is_first_page(page));
+
+ get_zspage_mapping(page, &class_idx, &currfg);
+ newfg = get_fullness_group(page);
+ if (newfg == currfg)
+ goto out;
+
+ class = &pool->size_class[class_idx];
+ remove_zspage(page, class, currfg);
+ insert_zspage(page, class, newfg);
+ set_zspage_mapping(page, class_idx, newfg);
+
+out:
+ return newfg;
+}
+
+/*
+ * We have to decide on how many pages to link together
+ * to form a zspage for each size class. This is important
+ * to reduce wastage due to unusable space left at end of
+ * each zspage which is given as:
+ * wastage = Zp - Zp % size_class
+ * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
+ *
+ * For example, for size class of 3/8 * PAGE_SIZE, we should
+ * link together 3 PAGE_SIZE sized pages to form a zspage
+ * since then we can perfectly fit in 8 such objects.
+ */
+static int get_zspage_order(int class_size)
+{
+ int i, max_usedpc = 0;
+ /* zspage order which gives maximum used size per KB */
+ int max_usedpc_order = 1;
+
+ for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
+ int zspage_size;
+ int waste, usedpc;
+
+ zspage_size = i * PAGE_SIZE;
+ waste = zspage_size % class_size;
+ usedpc = (zspage_size - waste) * 100 / zspage_size;
+
+ if (usedpc > max_usedpc) {
+ max_usedpc = usedpc;
+ max_usedpc_order = i;
+ }
+ }
+
+ return max_usedpc_order;
+}
+
+/*
+ * A single 'zspage' is composed of many system pages which are
+ * linked together using fields in struct page. This function finds
+ * the first/head page, given any component page of a zspage.
+ */
+static struct page *get_first_page(struct page *page)
+{
+ if (is_first_page(page))
+ return page;
+ else
+ return page->first_page;
+}
+
+static struct page *get_next_page(struct page *page)
+{
+ struct page *next;
+
+ if (is_last_page(page))
+ next = NULL;
+ else if (is_first_page(page))
+ next = (struct page *)page->private;
+ else
+ next = list_entry(page->lru.next, struct page, lru);
+
+ return next;
+}
+
+/* Encode <page, obj_idx> as a single handle value */
+static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+{
+ unsigned long handle;
+
+ if (!page) {
+ BUG_ON(obj_idx);
+ return NULL;
+ }
+
+ handle = page_to_pfn(page) << OBJ_INDEX_BITS;
+ handle |= (obj_idx & OBJ_INDEX_MASK);
+
+ return (void *)handle;
+}
+
+/* Decode <page, obj_idx> pair from the given object handle */
+static void obj_handle_to_location(void *handle, struct page **page,
+ unsigned long *obj_idx)
+{
+ unsigned long hval = (unsigned long)handle;
+
+ *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
+ *obj_idx = hval & OBJ_INDEX_MASK;
+}
+
+static unsigned long obj_idx_to_offset(struct page *page,
+ unsigned long obj_idx, int class_size)
+{
+ unsigned long off = 0;
+
+ if (!is_first_page(page))
+ off = page->index;
+
+ return off + obj_idx * class_size;
+}
+
+static void free_zspage(struct page *first_page)
+{
+ struct page *nextp, *tmp;
+
+ BUG_ON(!is_first_page(first_page));
+ BUG_ON(first_page->inuse);
+
+ nextp = (struct page *)page_private(first_page);
+
+ clear_bit(PG_private, &first_page->flags);
+ clear_bit(PG_private_2, &first_page->flags);
+ set_page_private(first_page, 0);
+ first_page->mapping = NULL;
+ first_page->freelist = NULL;
+ reset_page_mapcount(first_page);
+ __free_page(first_page);
+
+ /* zspage with only 1 system page */
+ if (!nextp)
+ return;
+
+ list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
+ list_del(&nextp->lru);
+ clear_bit(PG_private_2, &nextp->flags);
+ nextp->index = 0;
+ __free_page(nextp);
+ }
+}
+
+/* Initialize a newly allocated zspage */
+static void init_zspage(struct page *first_page, struct size_class *class)
+{
+ unsigned long off = 0;
+ struct page *page = first_page;
+
+ BUG_ON(!is_first_page(first_page));
+ while (page) {
+ struct page *next_page;
+ struct link_free *link;
+ unsigned int i, objs_on_page;
+
+ /*
+ * page->index stores offset of first object starting
+ * in the page. For the first page, this is always 0,
+ * so we use first_page->index (aka ->freelist) to store
+ * head of corresponding zspage's freelist.
+ */
+ if (page != first_page)
+ page->index = off;
+
+ link = (struct link_free *)kmap_atomic(page) +
+ off / sizeof(*link);
+ objs_on_page = (PAGE_SIZE - off) / class->size;
+
+ for (i = 1; i <= objs_on_page; i++) {
+ off += class->size;
+ if (off < PAGE_SIZE) {
+ link->next = obj_location_to_handle(page, i);
+ link += class->size / sizeof(*link);
+ }
+ }
+
+ /*
+ * We now come to the last (full or partial) object on this
+ * page, which must point to the first object on the next
+ * page (if present)
+ */
+ next_page = get_next_page(page);
+ link->next = obj_location_to_handle(next_page, 0);
+ kunmap_atomic(link);
+ page = next_page;
+ off = (off + class->size) % PAGE_SIZE;
+ }
+}
+
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+{
+ int i, error;
+ struct page *first_page = NULL;
+
+ /*
+ * Allocate individual pages and link them together as:
+ * 1. first page->private = first sub-page
+ * 2. all sub-pages are linked together using page->lru
+ * 3. each sub-page is linked to the first page using page->first_page
+ *
+ * For each size class, First/Head pages are linked together using
+ * page->lru. Also, we set PG_private to identify the first page
+ * (i.e. no other sub-page has this flag set) and PG_private_2 to
+ * identify the last page.
+ */
+ error = -ENOMEM;
+ for (i = 0; i < class->zspage_order; i++) {
+ struct page *page, *prev_page;
+
+ page = alloc_page(flags);
+ if (!page)
+ goto cleanup;
+
+ INIT_LIST_HEAD(&page->lru);
+ if (i == 0) { /* first page */
+ set_bit(PG_private, &page->flags);
+ set_page_private(page, 0);
+ first_page = page;
+ first_page->inuse = 0;
+ }
+ if (i == 1)
+ first_page->private = (unsigned long)page;
+ if (i >= 1)
+ page->first_page = first_page;
+ if (i >= 2)
+ list_add(&page->lru, &prev_page->lru);
+ if (i == class->zspage_order - 1) /* last page */
+ set_bit(PG_private_2, &page->flags);
+
+ prev_page = page;
+ }
+
+ init_zspage(first_page, class);
+
+ first_page->freelist = obj_location_to_handle(first_page, 0);
+ /* Maximum number of objects we can store in this zspage */
+ first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (unlikely(error) && first_page) {
+ free_zspage(first_page);
+ first_page = NULL;
+ }
+
+ return first_page;
+}
+
+static struct page *find_get_zspage(struct size_class *class)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
+ page = class->fullness_list[i];
+ if (page)
+ break;
+ }
+
+ return page;
+}
+
+
+/*
+ * If this becomes a separate module, register zs_init() with
+ * module_init(), zs_exit with module_exit(), and remove zs_initialized
+*/
+static int zs_initialized;
+
+static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *pcpu)
+{
+ int cpu = (long)pcpu;
+ struct mapping_area *area;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ break;
+ area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
+ if (!area->vm)
+ return notifier_from_errno(-ENOMEM);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zs_cpu_nb = {
+ .notifier_call = zs_cpu_notifier
+};
+
+static void zs_exit(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
+ unregister_cpu_notifier(&zs_cpu_nb);
+}
+
+static int zs_init(void)
+{
+ int cpu, ret;
+
+ register_cpu_notifier(&zs_cpu_nb);
+ for_each_online_cpu(cpu) {
+ ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ if (notifier_to_errno(ret))
+ goto fail;
+ }
+ return 0;
+fail:
+ zs_exit();
+ return notifier_to_errno(ret);
+}
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+{
+ int i, error, ovhd_size;
+ struct zs_pool *pool;
+
+ if (!name)
+ return NULL;
+
+ ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
+ pool = kzalloc(ovhd_size, GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int size;
+ struct size_class *class;
+
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+
+ class = &pool->size_class[i];
+ class->size = size;
+ class->index = i;
+ spin_lock_init(&class->lock);
+ class->zspage_order = get_zspage_order(size);
+
+ }
+
+ /*
+ * If this becomes a separate module, register zs_init with
+ * module_init, and remove this block
+ */
+ if (!zs_initialized) {
+ error = zs_init();
+ if (error)
+ goto cleanup;
+ zs_initialized = 1;
+ }
+
+ pool->flags = flags;
+ pool->name = name;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (error) {
+ zs_destroy_pool(pool);
+ pool = NULL;
+ }
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(zs_create_pool);
+
+void zs_destroy_pool(struct zs_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int fg;
+ struct size_class *class = &pool->size_class[i];
+
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size "
+ "%db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ }
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
+
+/**
+ * zs_malloc - Allocate block of given size from pool.
+ * @pool: pool to allocate from
+ * @size: size of block to allocate
+ * @page: page no. that holds the object
+ * @offset: location of object within page
+ *
+ * On success, <page, offset> identifies block allocated
+ * and 0 is returned. On failure, <page, offset> is set to
+ * 0 and -ENOMEM is returned.
+ *
+ * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
+ */
+void *zs_malloc(struct zs_pool *pool, size_t size)
+{
+ void *obj;
+ struct link_free *link;
+ int class_idx;
+ struct size_class *class;
+
+ struct page *first_page, *m_page;
+ unsigned long m_objidx, m_offset;
+
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ return NULL;
+
+ class_idx = get_size_class_index(size);
+ class = &pool->size_class[class_idx];
+ BUG_ON(class_idx != class->index);
+
+ spin_lock(&class->lock);
+ first_page = find_get_zspage(class);
+
+ if (!first_page) {
+ spin_unlock(&class->lock);
+ first_page = alloc_zspage(class, pool->flags);
+ if (unlikely(!first_page))
+ return NULL;
+
+ set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+ spin_lock(&class->lock);
+ class->pages_allocated += class->zspage_order;
+ }
+
+ obj = first_page->freelist;
+ obj_handle_to_location(obj, &m_page, &m_objidx);
+ m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+
+ link = (struct link_free *)kmap_atomic(m_page) +
+ m_offset / sizeof(*link);
+ first_page->freelist = link->next;
+ memset(link, POISON_INUSE, sizeof(*link));
+ kunmap_atomic(link);
+
+ first_page->inuse++;
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(pool, first_page);
+ spin_unlock(&class->lock);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(zs_malloc);
+
+void zs_free(struct zs_pool *pool, void *obj)
+{
+ struct link_free *link;
+ struct page *first_page, *f_page;
+ unsigned long f_objidx, f_offset;
+
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group fullness;
+
+ if (unlikely(!obj))
+ return;
+
+ obj_handle_to_location(obj, &f_page, &f_objidx);
+ first_page = get_first_page(f_page);
+
+ get_zspage_mapping(first_page, &class_idx, &fullness);
+ class = &pool->size_class[class_idx];
+ f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+
+ spin_lock(&class->lock);
+
+ /* Insert this object in containing zspage's freelist */
+ link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
+ + f_offset);
+ link->next = first_page->freelist;
+ kunmap_atomic(link);
+ first_page->freelist = obj;
+
+ first_page->inuse--;
+ fullness = fix_fullness_group(pool, first_page);
+
+ if (fullness == ZS_EMPTY)
+ class->pages_allocated -= class->zspage_order;
+
+ spin_unlock(&class->lock);
+
+ if (fullness == ZS_EMPTY)
+ free_zspage(first_page);
+}
+EXPORT_SYMBOL_GPL(zs_free);
+
+void *zs_map_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ } else {
+ /* this object spans two pages */
+ struct page *nextp;
+
+ nextp = get_next_page(page);
+ BUG_ON(!nextp);
+
+
+ set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
+ set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
+
+ /* We pre-allocated VM area so mapping can never fail */
+ area->vm_addr = area->vm->addr;
+ }
+
+ return area->vm_addr + off;
+}
+EXPORT_SYMBOL_GPL(zs_map_object);
+
+void zs_unmap_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &__get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ kunmap_atomic(area->vm_addr);
+ } else {
+ set_pte(area->vm_ptes[0], __pte(0));
+ set_pte(area->vm_ptes[1], __pte(0));
+ __flush_tlb_one((unsigned long)area->vm_addr);
+ __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
+ }
+ put_cpu_var(zs_map_area);
+}
+EXPORT_SYMBOL_GPL(zs_unmap_object);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool)
+{
+ int i;
+ u64 npages = 0;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++)
+ npages += pool->size_class[i].pages_allocated;
+
+ return npages << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
new file mode 100644
index 00000000000..949384ee749
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -0,0 +1,31 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_H_
+#define _ZS_MALLOC_H_
+
+#include <linux/types.h>
+
+struct zs_pool;
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+void zs_destroy_pool(struct zs_pool *pool);
+
+void *zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, void *obj);
+
+void *zs_map_object(struct zs_pool *pool, void *handle);
+void zs_unmap_object(struct zs_pool *pool, void *handle);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool);
+
+#endif
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
new file mode 100644
index 00000000000..92eefc663af
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc_int.h
@@ -0,0 +1,155 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_INT_H_
+#define _ZS_MALLOC_INT_H_
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/*
+ * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
+ * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
+ */
+#define ZS_MAX_ZSPAGE_ORDER 2
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (void *) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ *
+ * This is made more complicated by various memory models and PAE.
+ */
+
+#ifndef MAX_PHYSMEM_BITS
+#ifdef CONFIG_HIGHMEM64G
+#define MAX_PHYSMEM_BITS 36
+#else /* !CONFIG_HIGHMEM64G */
+/*
+ * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
+ * be PAGE_SHIFT
+ */
+#define MAX_PHYSMEM_BITS BITS_PER_LONG
+#endif
+#endif
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE \
+ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA 16
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct mapping_area {
+ struct vm_struct *vm;
+ pte_t *vm_ptes[2];
+ char *vm_addr;
+};
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int zspage_order;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+ const char *name;
+};
+
+#endif