diff options
Diffstat (limited to 'drivers')
65 files changed, 10117 insertions, 819 deletions
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index be1cc514335..ef522ae5548 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -530,7 +530,8 @@ void bus_remove_device(struct device *dev) sysfs_remove_link(&dev->bus->p->devices_kset->kobj, dev->bus_id); device_remove_attrs(dev->bus, dev); - klist_del(&dev->knode_bus); + if (klist_node_attached(&dev->knode_bus)) + klist_del(&dev->knode_bus); pr_debug("bus: '%s': remove device %s\n", dev->bus->name, dev->bus_id); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c4568b82875..7b76fd3b93a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -62,7 +62,7 @@ static bool all_sleeping; */ int device_pm_add(struct device *dev) { - int error = 0; + int error; pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", @@ -70,18 +70,15 @@ int device_pm_add(struct device *dev) mutex_lock(&dpm_list_mtx); if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { if (dev->parent->power.sleeping) - dev_warn(dev, - "parent %s is sleeping, will not add\n", + dev_warn(dev, "parent %s is sleeping\n", dev->parent->bus_id); else - dev_warn(dev, "devices are sleeping, will not add\n"); + dev_warn(dev, "all devices are sleeping\n"); WARN_ON(true); - error = -EBUSY; - } else { - error = dpm_sysfs_add(dev); - if (!error) - list_add_tail(&dev->power.entry, &dpm_active); } + error = dpm_sysfs_add(dev); + if (!error) + list_add_tail(&dev->power.entry, &dpm_active); mutex_unlock(&dpm_list_mtx); return error; } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 85364804364..7bd76639544 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -108,7 +108,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) #ifndef CONFIG_BLK_DEV_XIP gfp_flags |= __GFP_HIGHMEM; #endif - page = alloc_page(GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO); + page = alloc_page(gfp_flags); if (!page) return NULL; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f0b00ec1e47..e03c67dd3e6 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -44,8 +44,8 @@ #ifdef CONFIG_HID_DEBUG int hid_debug = 0; -module_param_named(debug, hid_debug, bool, 0600); -MODULE_PARM_DESC(debug, "Turn HID debugging mode on and off"); +module_param_named(debug, hid_debug, int, 0600); +MODULE_PARM_DESC(debug, "HID debugging (0=off, 1=probing info, 2=continuous data dumping)"); EXPORT_SYMBOL_GPL(hid_debug); #endif @@ -97,7 +97,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field->index = report->maxfield++; report->field[field->index] = field; field->usage = (struct hid_usage *)(field + 1); - field->value = (unsigned *)(field->usage + usages); + field->value = (s32 *)(field->usage + usages); field->report = report; return field; @@ -830,7 +830,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, s * reporting to the layer). */ -void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt) +static void hid_input_field(struct hid_device *hid, struct hid_field *field, + __u8 *data, int interrupt) { unsigned n; unsigned count = field->report_count; @@ -876,7 +877,6 @@ void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data exit: kfree(value); } -EXPORT_SYMBOL_GPL(hid_input_field); /* * Output the field into the report. @@ -988,8 +988,13 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) hid->hiddev_report_event(hid, report); - if (hid->claimed & HID_CLAIMED_HIDRAW) - hidraw_report_event(hid, data, size); + if (hid->claimed & HID_CLAIMED_HIDRAW) { + /* numbered reports need to be passed with the report num */ + if (report_enum->numbered) + hidraw_report_event(hid, data - 1, size + 1); + else + hidraw_report_event(hid, data, size); + } for (n = 0; n < report->maxfield; n++) hid_input_field(hid, report->field[n], data, interrupt); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 5c24fe46d8e..f88714b0600 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -498,7 +498,7 @@ void hid_dump_device(struct hid_device *device) { EXPORT_SYMBOL_GPL(hid_dump_device); void hid_dump_input(struct hid_usage *usage, __s32 value) { - if (!hid_debug) + if (hid_debug < 2) return; printk(KERN_DEBUG "hid-debug: input "); diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c index dceadd0c141..4c2052c658f 100644 --- a/drivers/hid/hid-input-quirks.c +++ b/drivers/hid/hid-input-quirks.c @@ -276,6 +276,21 @@ static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input, return 1; } +static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *input, + unsigned long **bit, int *max) +{ + if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) + return 0; + + switch (usage->hid & HID_USAGE) { + case 0x2003: map_key_clear(KEY_ZOOMIN); break; + case 0x2103: map_key_clear(KEY_ZOOMOUT); break; + default: + return 0; + } + return 1; +} + #define VENDOR_ID_BELKIN 0x1020 #define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD 0x0006 @@ -306,6 +321,9 @@ static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input, #define VENDOR_ID_PETALYNX 0x18b1 #define DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 +#define VENDOR_ID_SUNPLUS 0x04fc +#define DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 + static const struct hid_input_blacklist { __u16 idVendor; __u16 idProduct; @@ -332,8 +350,10 @@ static const struct hid_input_blacklist { { VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e }, { VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote }, - - { 0, 0, 0 } + + { VENDOR_ID_SUNPLUS, DEVICE_ID_SUNPLUS_WDESKTOP, quirk_sunplus_wdesktop }, + + { 0, 0, NULL } }; int hidinput_mapping_quirks(struct hid_usage *usage, diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig index 7160fa65d79..18f09104765 100644 --- a/drivers/hid/usbhid/Kconfig +++ b/drivers/hid/usbhid/Kconfig @@ -71,6 +71,14 @@ config LOGITECH_FF Note: if you say N here, this device will still be supported, but without force feedback. +config LOGIRUMBLEPAD2_FF + bool "Logitech Rumblepad 2 support" + depends on HID_FF + select INPUT_FF_MEMLESS if USB_HID + help + Say Y here if you want to enable force feedback support for Logitech + Rumblepad 2 devices. + config PANTHERLORD_FF bool "PantherLord/GreenAsia based device support" depends on HID_FF @@ -80,8 +88,8 @@ config PANTHERLORD_FF or adapter and want to enable force feedback support for it. config THRUSTMASTER_FF - bool "ThrustMaster devices support (EXPERIMENTAL)" - depends on HID_FF && EXPERIMENTAL + bool "ThrustMaster devices support" + depends on HID_FF select INPUT_FF_MEMLESS if USB_HID help Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile index 8e6ab5b164a..00a7b709019 100644 --- a/drivers/hid/usbhid/Makefile +++ b/drivers/hid/usbhid/Makefile @@ -16,6 +16,9 @@ endif ifeq ($(CONFIG_LOGITECH_FF),y) usbhid-objs += hid-lgff.o endif +ifeq ($(CONFIG_LOGIRUMBLEPAD2_FF),y) + usbhid-objs += hid-lg2ff.o +endif ifeq ($(CONFIG_PANTHERLORD_FF),y) usbhid-objs += hid-plff.o endif diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index d95979f0e02..e0d805f1b2b 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -82,6 +82,7 @@ static int hid_start_in(struct hid_device *hid) spin_lock_irqsave(&usbhid->inlock, flags); if (hid->open > 0 && !test_bit(HID_SUSPENDED, &usbhid->iofl) && + !test_bit(HID_DISCONNECTED, &usbhid->iofl) && !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC); if (rc != 0) @@ -155,7 +156,7 @@ static void hid_io_error(struct hid_device *hid) spin_lock_irqsave(&usbhid->inlock, flags); /* Stop when disconnected */ - if (usb_get_intfdata(usbhid->intf) == NULL) + if (test_bit(HID_DISCONNECTED, &usbhid->iofl)) goto done; /* If it has been a while since the last error, we'll assume @@ -341,7 +342,7 @@ static void hid_irq_out(struct urb *urb) if (usbhid->outhead != usbhid->outtail) { if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); - wake_up(&hid->wait); + wake_up(&usbhid->wait); } spin_unlock_irqrestore(&usbhid->outlock, flags); return; @@ -349,7 +350,7 @@ static void hid_irq_out(struct urb *urb) clear_bit(HID_OUT_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->outlock, flags); - wake_up(&hid->wait); + wake_up(&usbhid->wait); } /* @@ -391,7 +392,7 @@ static void hid_ctrl(struct urb *urb) if (usbhid->ctrlhead != usbhid->ctrltail) { if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); - wake_up(&hid->wait); + wake_up(&usbhid->wait); } spin_unlock_irqrestore(&usbhid->ctrllock, flags); return; @@ -399,7 +400,7 @@ static void hid_ctrl(struct urb *urb) clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->ctrllock, flags); - wake_up(&hid->wait); + wake_up(&usbhid->wait); } void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) @@ -478,8 +479,9 @@ int usbhid_wait_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; - if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && - !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), + if (!wait_event_timeout(usbhid->wait, + (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && + !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 10*HZ)) { dbg_hid("timeout waiting for ctrl or out queue to clear\n"); return -1; @@ -610,10 +612,11 @@ static void usbhid_set_leds(struct hid_device *hid) /* * Traverse the supplied list of reports and find the longest */ -static void hid_find_max_report(struct hid_device *hid, unsigned int type, int *max) +static void hid_find_max_report(struct hid_device *hid, unsigned int type, + unsigned int *max) { struct hid_report *report; - int size; + unsigned int size; list_for_each_entry(report, &hid->report_enum[type].report_list, list) { size = ((report->size - 1) >> 3) + 1; @@ -705,9 +708,9 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) struct hid_descriptor *hdesc; struct hid_device *hid; u32 quirks = 0; - unsigned rsize = 0; + unsigned int insize = 0, rsize = 0; char *rdesc; - int n, len, insize = 0; + int n, len; struct usbhid_device *usbhid; quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), @@ -800,6 +803,22 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) goto fail; } + hid->name[0] = 0; + + if (dev->manufacturer) + strlcpy(hid->name, dev->manufacturer, sizeof(hid->name)); + + if (dev->product) { + if (dev->manufacturer) + strlcat(hid->name, " ", sizeof(hid->name)); + strlcat(hid->name, dev->product, sizeof(hid->name)); + } + + if (!strlen(hid->name)) + snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x", + le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + for (n = 0; n < interface->desc.bNumEndpoints; n++) { struct usb_endpoint_descriptor *endpoint; @@ -812,6 +831,14 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) interval = endpoint->bInterval; + /* Some vendors give fullspeed interval on highspeed devides */ + if (quirks & HID_QUIRK_FULLSPEED_INTERVAL && + dev->speed == USB_SPEED_HIGH) { + interval = fls(endpoint->bInterval*8); + printk(KERN_INFO "%s: Fixing fullspeed to highspeed interval: %d -> %d\n", + hid->name, endpoint->bInterval, interval); + } + /* Change the polling interval of mice. */ if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0) interval = hid_mousepoll_interval; @@ -844,8 +871,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) goto fail; } - init_waitqueue_head(&hid->wait); - + init_waitqueue_head(&usbhid->wait); INIT_WORK(&usbhid->reset_work, hid_reset); setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); @@ -859,22 +885,6 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) usbhid->intf = intf; usbhid->ifnum = interface->desc.bInterfaceNumber; - hid->name[0] = 0; - - if (dev->manufacturer) - strlcpy(hid->name, dev->manufacturer, sizeof(hid->name)); - - if (dev->product) { - if (dev->manufacturer) - strlcat(hid->name, " ", sizeof(hid->name)); - strlcat(hid->name, dev->product, sizeof(hid->name)); - } - - if (!strlen(hid->name)) - snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x", - le16_to_cpu(dev->descriptor.idVendor), - le16_to_cpu(dev->descriptor.idProduct)); - hid->bus = BUS_USB; hid->vendor = le16_to_cpu(dev->descriptor.idVendor); hid->product = le16_to_cpu(dev->descriptor.idProduct); @@ -932,6 +942,7 @@ static void hid_disconnect(struct usb_interface *intf) spin_lock_irq(&usbhid->inlock); /* Sync with error handler */ usb_set_intfdata(intf, NULL); + set_bit(HID_DISCONNECTED, &usbhid->iofl); spin_unlock_irq(&usbhid->inlock); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); diff --git a/drivers/hid/usbhid/hid-ff.c b/drivers/hid/usbhid/hid-ff.c index 4c210e16b1b..1d0dac52f16 100644 --- a/drivers/hid/usbhid/hid-ff.c +++ b/drivers/hid/usbhid/hid-ff.c @@ -59,6 +59,9 @@ static struct hid_ff_initializer inits[] = { { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ { 0x46d, 0xca03, hid_lgff_init }, /* Logitech MOMO force wheel */ #endif +#ifdef CONFIG_LOGIRUMBLEPAD2_FF + { 0x46d, 0xc218, hid_lg2ff_init }, /* Logitech Rumblepad 2 */ +#endif #ifdef CONFIG_PANTHERLORD_FF { 0x810, 0x0001, hid_plff_init }, /* "Twin USB Joystick" */ { 0xe8f, 0x0003, hid_plff_init }, /* "GreenAsia Inc. USB Joystick " */ diff --git a/drivers/hid/usbhid/hid-lg2ff.c b/drivers/hid/usbhid/hid-lg2ff.c new file mode 100644 index 00000000000..d469bd0061c --- /dev/null +++ b/drivers/hid/usbhid/hid-lg2ff.c @@ -0,0 +1,114 @@ +/* + * Force feedback support for Logitech Rumblepad 2 + * + * Copyright (c) 2008 Anssi Hannula <anssi.hannula@gmail.com> + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include <linux/input.h> +#include <linux/usb.h> +#include <linux/hid.h> +#include "usbhid.h" + +struct lg2ff_device { + struct hid_report *report; +}; + +static int play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct hid_device *hid = input_get_drvdata(dev); + struct lg2ff_device *lg2ff = data; + int weak, strong; + + strong = effect->u.rumble.strong_magnitude; + weak = effect->u.rumble.weak_magnitude; + + if (weak || strong) { + weak = weak * 0xff / 0xffff; + strong = strong * 0xff / 0xffff; + + lg2ff->report->field[0]->value[0] = 0x51; + lg2ff->report->field[0]->value[2] = weak; + lg2ff->report->field[0]->value[4] = strong; + } else { + lg2ff->report->field[0]->value[0] = 0xf3; + lg2ff->report->field[0]->value[2] = 0x00; + lg2ff->report->field[0]->value[4] = 0x00; + } + + usbhid_submit_report(hid, lg2ff->report, USB_DIR_OUT); + return 0; +} + +int hid_lg2ff_init(struct hid_device *hid) +{ + struct lg2ff_device *lg2ff; + struct hid_report *report; + struct hid_input *hidinput = list_entry(hid->inputs.next, + struct hid_input, list); + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; + struct input_dev *dev = hidinput->input; + int error; + + if (list_empty(report_list)) { + printk(KERN_ERR "hid-lg2ff: no output report found\n"); + return -ENODEV; + } + + report = list_entry(report_list->next, struct hid_report, list); + + if (report->maxfield < 1) { + printk(KERN_ERR "hid-lg2ff: output report is empty\n"); + return -ENODEV; + } + if (report->field[0]->report_count < 7) { + printk(KERN_ERR "hid-lg2ff: not enough values in the field\n"); + return -ENODEV; + } + + lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL); + if (!lg2ff) + return -ENOMEM; + + set_bit(FF_RUMBLE, dev->ffbit); + + error = input_ff_create_memless(dev, lg2ff, play_effect); + if (error) { + kfree(lg2ff); + return error; + } + + lg2ff->report = report; + report->field[0]->value[0] = 0xf3; + report->field[0]->value[1] = 0x00; + report->field[0]->value[2] = 0x00; + report->field[0]->value[3] = 0x00; + report->field[0]->value[4] = 0x00; + report->field[0]->value[5] = 0x00; + report->field[0]->value[6] = 0x00; + + usbhid_submit_report(hid, report, USB_DIR_OUT); + + printk(KERN_INFO "Force feedback for Logitech Rumblepad 2 by " + "Anssi Hannula <anssi.hannula@gmail.com>\n"); + + return 0; +} diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index e29a057cbea..28ddc3fdd3d 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -32,6 +32,9 @@ #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 +#define USB_VENDOR_ID_AFATECH 0x15a4 +#define USB_DEVICE_ID_AFATECH_AF9016 0x9016 + #define USB_VENDOR_ID_AIPTEK 0x08ca #define USB_DEVICE_ID_AIPTEK_01 0x0001 #define USB_DEVICE_ID_AIPTEK_10 0x0010 @@ -124,6 +127,9 @@ #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 +#define USB_VENDOR_ID_DMI 0x0c0b +#define USB_DEVICE_ID_DMI_ENC 0x5fab + #define USB_VENDOR_ID_ELO 0x04E7 #define USB_DEVICE_ID_ELO_TS2700 0x0020 @@ -199,17 +205,6 @@ #define USB_DEVICE_ID_GTCO_502 0x0502 #define USB_DEVICE_ID_GTCO_503 0x0503 #define USB_DEVICE_ID_GTCO_504 0x0504 -#define USB_DEVICE_ID_GTCO_600 0x0600 -#define USB_DEVICE_ID_GTCO_601 0x0601 -#define USB_DEVICE_ID_GTCO_602 0x0602 -#define USB_DEVICE_ID_GTCO_603 0x0603 -#define USB_DEVICE_ID_GTCO_604 0x0604 -#define USB_DEVICE_ID_GTCO_605 0x0605 -#define USB_DEVICE_ID_GTCO_606 0x0606 -#define USB_DEVICE_ID_GTCO_607 0x0607 -#define USB_DEVICE_ID_GTCO_608 0x0608 -#define USB_DEVICE_ID_GTCO_609 0x0609 -#define USB_DEVICE_ID_GTCO_609 0x0609 #define USB_DEVICE_ID_GTCO_1000 0x1000 #define USB_DEVICE_ID_GTCO_1001 0x1001 #define USB_DEVICE_ID_GTCO_1002 0x1002 @@ -320,6 +315,7 @@ #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 #define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 #define USB_DEVICE_ID_DINOVO_EDGE 0xc714 +#define USB_DEVICE_ID_DINOVO_MINI 0xc71f #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -332,6 +328,7 @@ #define USB_VENDOR_ID_MICROSOFT 0x045e #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d +#define USB_DEVICE_ID_DESKTOP_RECV_1028 0x00f9 #define USB_DEVICE_ID_MS_NE4K 0x00db #define USB_DEVICE_ID_MS_LK6K 0x00f9 @@ -377,6 +374,9 @@ #define USB_VENDOR_ID_SUN 0x0430 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab +#define USB_VENDOR_ID_SUNPLUS 0x04fc +#define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 + #define USB_VENDOR_ID_TOPMAX 0x0663 #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 @@ -435,9 +435,13 @@ static const struct hid_blacklist { { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, + + { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, { USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV }, { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT }, + { USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT }, { USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT }, @@ -518,16 +522,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_600, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_601, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_602, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_603, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_604, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_605, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_606, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_607, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_608, HID_QUIRK_IGNORE }, - { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_609, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE }, { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE }, @@ -601,6 +595,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET }, @@ -608,7 +603,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, @@ -719,6 +714,7 @@ static const struct hid_rdesc_blacklist { { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_DESKTOP_RECV_1028, HID_QUIRK_RDESC_MICROSOFT_RECV_1028 }, { USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER }, @@ -728,6 +724,8 @@ static const struct hid_rdesc_blacklist { { USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE }, + { USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP, HID_QUIRK_RDESC_SUNPLUS_WDESKTOP }, + { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX }, { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX }, @@ -793,8 +791,8 @@ static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor, * * Returns: 0 OK, -error on failure. */ -int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, - const u32 quirks) +static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, + const u32 quirks) { struct quirks_list_struct *q_new, *q; int list_edited = 0; @@ -1002,6 +1000,17 @@ static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize) } } +static void usbhid_fixup_sunplus_wdesktop(unsigned char *rdesc, int rsize) +{ + if (rsize >= 107 && rdesc[104] == 0x26 + && rdesc[105] == 0x80 + && rdesc[106] == 0x03) { + printk(KERN_INFO "Fixing up Sunplus Wireless Desktop report descriptor\n"); + rdesc[105] = rdesc[110] = 0x03; + rdesc[106] = rdesc[111] = 0x21; + } +} + /* * Samsung IrDA remote controller (reports as Cypress USB Mouse). * @@ -1089,6 +1098,28 @@ static void usbhid_fixup_button_consumer_descriptor(unsigned char *rdesc, int rs } } +/* + * Microsoft Wireless Desktop Receiver (Model 1028) has several + * 'Usage Min/Max' where it ought to have 'Physical Min/Max' + */ +static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize) +{ + if (rsize == 571 && rdesc[284] == 0x19 + && rdesc[286] == 0x2a + && rdesc[304] == 0x19 + && rdesc[306] == 0x29 + && rdesc[352] == 0x1a + && rdesc[355] == 0x2a + && rdesc[557] == 0x19 + && rdesc[559] == 0x29) { + printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); + rdesc[284] = rdesc[304] = rdesc[558] = 0x35; + rdesc[352] = 0x36; + rdesc[286] = rdesc[355] = 0x46; + rdesc[306] = rdesc[559] = 0x45; + } +} + static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize) { if ((quirks & HID_QUIRK_RDESC_CYMOTION)) @@ -1112,6 +1143,11 @@ static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE) usbhid_fixup_samsung_irda_descriptor(rdesc, rsize); + if (quirks & HID_QUIRK_RDESC_MICROSOFT_RECV_1028) + usbhid_fixup_microsoft_descriptor(rdesc, rsize); + + if (quirks & HID_QUIRK_RDESC_SUNPLUS_WDESKTOP) + usbhid_fixup_sunplus_wdesktop(rdesc, rsize); } /** @@ -1150,5 +1186,4 @@ void usbhid_fixup_report_descriptor(const u16 idVendor, const u16 idProduct, else if (paramVendor == idVendor && paramProduct == idProduct) __usbhid_fixup_report_descriptor(quirks, rdesc, rsize); } - } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 5fc4019956b..95cc192bc7a 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -393,6 +393,153 @@ static unsigned int hiddev_poll(struct file *file, poll_table *wait) /* * "ioctl" file op */ +static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg) +{ + struct hid_device *hid = hiddev->hid; + struct hiddev_report_info rinfo; + struct hiddev_usage_ref_multi *uref_multi = NULL; + struct hiddev_usage_ref *uref; + struct hid_report *report; + struct hid_field *field; + int i; + + uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); + if (!uref_multi) + return -ENOMEM; + uref = &uref_multi->uref; + if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { + if (copy_from_user(uref_multi, user_arg, + sizeof(*uref_multi))) + goto fault; + } else { + if (copy_from_user(uref, user_arg, sizeof(*uref))) + goto fault; + } + + switch (cmd) { + case HIDIOCGUCODE: + rinfo.report_type = uref->report_type; + rinfo.report_id = uref->report_id; + if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) + goto inval; + + if (uref->field_index >= report->maxfield) + goto inval; + + field = report->field[uref->field_index]; + if (uref->usage_index >= field->maxusage) + goto inval; + + uref->usage_code = field->usage[uref->usage_index].hid; + + if (copy_to_user(user_arg, uref, sizeof(*uref))) + goto fault; + + kfree(uref_multi); + return 0; + + default: + if (cmd != HIDIOCGUSAGE && + cmd != HIDIOCGUSAGES && + uref->report_type == HID_REPORT_TYPE_INPUT) + goto inval; + + if (uref->report_id == HID_REPORT_ID_UNKNOWN) { + field = hiddev_lookup_usage(hid, uref); + if (field == NULL) + goto inval; + } else { + rinfo.report_type = uref->report_type; + rinfo.report_id = uref->report_id; + if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) + goto inval; + + if (uref->field_index >= report->maxfield) + goto inval; + + field = report->field[uref->field_index]; + + if (cmd == HIDIOCGCOLLECTIONINDEX) { + if (uref->usage_index >= field->maxusage) + goto inval; + } else if (uref->usage_index >= field->report_count) + goto inval; + + else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && + (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > field->report_count)) + goto inval; + } + + switch (cmd) { + case HIDIOCGUSAGE: + uref->value = field->value[uref->usage_index]; + if (copy_to_user(user_arg, uref, sizeof(*uref))) + goto fault; + goto goodreturn; + + case HIDIOCSUSAGE: + field->value[uref->usage_index] = uref->value; + goto goodreturn; + + case HIDIOCGCOLLECTIONINDEX: + kfree(uref_multi); + return field->usage[uref->usage_index].collection_index; + case HIDIOCGUSAGES: + for (i = 0; i < uref_multi->num_values; i++) + uref_multi->values[i] = + field->value[uref->usage_index + i]; + if (copy_to_user(user_arg, uref_multi, + sizeof(*uref_multi))) + goto fault; + goto goodreturn; + case HIDIOCSUSAGES: + for (i = 0; i < uref_multi->num_values; i++) + field->value[uref->usage_index + i] = + uref_multi->values[i]; + goto goodreturn; + } + +goodreturn: + kfree(uref_multi); + return 0; +fault: + kfree(uref_multi); + return -EFAULT; +inval: + kfree(uref_multi); + return -EINVAL; + } +} + +static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg) +{ + struct hid_device *hid = hiddev->hid; + struct usb_device *dev = hid_to_usb_dev(hid); + int idx, len; + char *buf; + + if (get_user(idx, (int __user *)user_arg)) + return -EFAULT; + + if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL) + return -ENOMEM; + + if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) { + kfree(buf); + return -EINVAL; + } + + if (copy_to_user(user_arg+sizeof(int), buf, len+1)) { + kfree(buf); + return -EFAULT; + } + + kfree(buf); + + return len; +} + static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct hiddev_list *list = file->private_data; @@ -402,8 +549,6 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd struct hiddev_collection_info cinfo; struct hiddev_report_info rinfo; struct hiddev_field_info finfo; - struct hiddev_usage_ref_multi *uref_multi = NULL; - struct hiddev_usage_ref *uref; struct hiddev_devinfo dinfo; struct hid_report *report; struct hid_field *field; @@ -470,30 +615,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd } case HIDIOCGSTRING: - { - int idx, len; - char *buf; - - if (get_user(idx, (int __user *)arg)) - return -EFAULT; - - if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL) - return -ENOMEM; - - if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) { - kfree(buf); - return -EINVAL; - } - - if (copy_to_user(user_arg+sizeof(int), buf, len+1)) { - kfree(buf); - return -EFAULT; - } - - kfree(buf); - - return len; - } + return hiddev_ioctl_string(hiddev, cmd, user_arg); case HIDIOCINITREPORT: usbhid_init_reports(hid); @@ -578,121 +700,13 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd return 0; case HIDIOCGUCODE: - uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); - if (!uref_multi) - return -ENOMEM; - uref = &uref_multi->uref; - if (copy_from_user(uref, user_arg, sizeof(*uref))) - goto fault; - - rinfo.report_type = uref->report_type; - rinfo.report_id = uref->report_id; - if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) - goto inval; - - if (uref->field_index >= report->maxfield) - goto inval; - - field = report->field[uref->field_index]; - if (uref->usage_index >= field->maxusage) - goto inval; - - uref->usage_code = field->usage[uref->usage_index].hid; - - if (copy_to_user(user_arg, uref, sizeof(*uref))) - goto fault; - - kfree(uref_multi); - return 0; - + /* fall through */ case HIDIOCGUSAGE: case HIDIOCSUSAGE: case HIDIOCGUSAGES: case HIDIOCSUSAGES: case HIDIOCGCOLLECTIONINDEX: - uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); - if (!uref_multi) - return -ENOMEM; - uref = &uref_multi->uref; - if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { - if (copy_from_user(uref_multi, user_arg, - sizeof(*uref_multi))) - goto fault; - } else { - if (copy_from_user(uref, user_arg, sizeof(*uref))) - goto fault; - } - - if (cmd != HIDIOCGUSAGE && - cmd != HIDIOCGUSAGES && - uref->report_type == HID_REPORT_TYPE_INPUT) - goto inval; - - if (uref->report_id == HID_REPORT_ID_UNKNOWN) { - field = hiddev_lookup_usage(hid, uref); - if (field == NULL) - goto inval; - } else { - rinfo.report_type = uref->report_type; - rinfo.report_id = uref->report_id; - if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) - goto inval; - - if (uref->field_index >= report->maxfield) - goto inval; - - field = report->field[uref->field_index]; - - if (cmd == HIDIOCGCOLLECTIONINDEX) { - if (uref->usage_index >= field->maxusage) - goto inval; - } else if (uref->usage_index >= field->report_count) - goto inval; - - else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; - } - - switch (cmd) { - case HIDIOCGUSAGE: - uref->value = field->value[uref->usage_index]; - if (copy_to_user(user_arg, uref, sizeof(*uref))) - goto fault; - goto goodreturn; - - case HIDIOCSUSAGE: - field->value[uref->usage_index] = uref->value; - goto goodreturn; - - case HIDIOCGCOLLECTIONINDEX: - kfree(uref_multi); - return field->usage[uref->usage_index].collection_index; - case HIDIOCGUSAGES: - for (i = 0; i < uref_multi->num_values; i++) - uref_multi->values[i] = - field->value[uref->usage_index + i]; - if (copy_to_user(user_arg, uref_multi, - sizeof(*uref_multi))) - goto fault; - goto goodreturn; - case HIDIOCSUSAGES: - for (i = 0; i < uref_multi->num_values; i++) - field->value[uref->usage_index + i] = - uref_multi->values[i]; - goto goodreturn; - } - -goodreturn: - kfree(uref_multi); - return 0; -fault: - kfree(uref_multi); - return -EFAULT; -inval: - kfree(uref_multi); - return -EINVAL; + return hiddev_ioctl_usage(hiddev, cmd, user_arg); case HIDIOCGCOLLECTIONINFO: if (copy_from_user(&cinfo, user_arg, sizeof(cinfo))) diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 0023f96d429..62d2d7c925b 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/timer.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include <linux/input.h> @@ -77,7 +78,7 @@ struct usbhid_device { unsigned long stop_retry; /* Time to give up, in jiffies */ unsigned int retry_delay; /* Delay length in ms */ struct work_struct reset_work; /* Task context for resets */ - + wait_queue_head_t wait; /* For sleeping */ }; #define hid_to_usb_dev(hid_dev) \ diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig index 014dfa575be..7137a17402f 100644 --- a/drivers/i2c/algos/Kconfig +++ b/drivers/i2c/algos/Kconfig @@ -1,45 +1,16 @@ # -# Character device configuration +# I2C algorithm drivers configuration # -menu "I2C Algorithms" - config I2C_ALGOBIT - tristate "I2C bit-banging interfaces" - help - This allows you to use a range of I2C adapters called bit-banging - adapters. Say Y if you own an I2C adapter belonging to this class - and then say Y to the specific driver for you adapter below. - - This support is also available as a module. If so, the module - will be called i2c-algo-bit. + tristate config I2C_ALGOPCF - tristate "I2C PCF 8584 interfaces" - help - This allows you to use a range of I2C adapters called PCF adapters. - Say Y if you own an I2C adapter belonging to this class and then say - Y to the specific driver for you adapter below. - - This support is also available as a module. If so, the module - will be called i2c-algo-pcf. + tristate config I2C_ALGOPCA - tristate "I2C PCA 9564 interfaces" - help - This allows you to use a range of I2C adapters called PCA adapters. - Say Y if you own an I2C adapter belonging to this class and then say - Y to the specific driver for you adapter below. - - This support is also available as a module. If so, the module - will be called i2c-algo-pca. + tristate config I2C_ALGO_SGI - tristate "I2C SGI interfaces" + tristate depends on SGI_IP22 || SGI_IP32 || X86_VISWS - help - Supports the SGI interfaces like the ones found on SGI Indy VINO - or SGI O2 MACE. - -endmenu - diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 2a16211f12e..e954a20b97a 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -1,6 +1,7 @@ /* - * i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters + * i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters * Copyright (C) 2004 Arcom Control Systems + * Copyright (C) 2008 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,14 +22,10 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/i2c-algo-pca.h> -#include "i2c-algo-pca.h" - -#define DRIVER "i2c-algo-pca" #define DEB1(fmt, args...) do { if (i2c_debug>=1) printk(fmt, ## args); } while(0) #define DEB2(fmt, args...) do { if (i2c_debug>=2) printk(fmt, ## args); } while(0) @@ -36,15 +33,15 @@ static int i2c_debug; -#define pca_outw(adap, reg, val) adap->write_byte(adap, reg, val) -#define pca_inw(adap, reg) adap->read_byte(adap, reg) +#define pca_outw(adap, reg, val) adap->write_byte(adap->data, reg, val) +#define pca_inw(adap, reg) adap->read_byte(adap->data, reg) #define pca_status(adap) pca_inw(adap, I2C_PCA_STA) -#define pca_clock(adap) adap->get_clock(adap) -#define pca_own(adap) adap->get_own(adap) +#define pca_clock(adap) adap->i2c_clock #define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val) #define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON) -#define pca_wait(adap) adap->wait_for_interrupt(adap) +#define pca_wait(adap) adap->wait_for_completion(adap->data) +#define pca_reset(adap) adap->reset_chip(adap->data) /* * Generate a start condition on the i2c bus. @@ -99,7 +96,7 @@ static void pca_stop(struct i2c_algo_pca_data *adap) * * returns after the address has been sent */ -static void pca_address(struct i2c_algo_pca_data *adap, +static void pca_address(struct i2c_algo_pca_data *adap, struct i2c_msg *msg) { int sta = pca_get_con(adap); @@ -108,9 +105,9 @@ static void pca_address(struct i2c_algo_pca_data *adap, addr = ( (0x7f & msg->addr) << 1 ); if (msg->flags & I2C_M_RD ) addr |= 1; - DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n", + DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n", msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr); - + pca_outw(adap, I2C_PCA_DAT, addr); sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI); @@ -124,7 +121,7 @@ static void pca_address(struct i2c_algo_pca_data *adap, * * Returns after the byte has been transmitted */ -static void pca_tx_byte(struct i2c_algo_pca_data *adap, +static void pca_tx_byte(struct i2c_algo_pca_data *adap, __u8 b) { int sta = pca_get_con(adap); @@ -142,19 +139,19 @@ static void pca_tx_byte(struct i2c_algo_pca_data *adap, * * returns immediately. */ -static void pca_rx_byte(struct i2c_algo_pca_data *adap, +static void pca_rx_byte(struct i2c_algo_pca_data *adap, __u8 *b, int ack) { *b = pca_inw(adap, I2C_PCA_DAT); DEB2("=== READ %#04x %s\n", *b, ack ? "ACK" : "NACK"); } -/* +/* * Setup ACK or NACK for next received byte and wait for it to arrive. * * Returns after next byte has arrived. */ -static void pca_rx_ack(struct i2c_algo_pca_data *adap, +static void pca_rx_ack(struct i2c_algo_pca_data *adap, int ack) { int sta = pca_get_con(adap); @@ -168,15 +165,6 @@ static void pca_rx_ack(struct i2c_algo_pca_data *adap, pca_wait(adap); } -/* - * Reset the i2c bus / SIO - */ -static void pca_reset(struct i2c_algo_pca_data *adap) -{ - /* apparently only an external reset will do it. not a lot can be done */ - printk(KERN_ERR DRIVER ": Haven't figured out how to do a reset yet\n"); -} - static int pca_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) @@ -187,7 +175,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, int numbytes = 0; int state; int ret; - int timeout = 100; + int timeout = i2c_adap->timeout; while ((state = pca_status(adap)) != 0xf8 && timeout--) { msleep(10); @@ -203,14 +191,14 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, for (curmsg = 0; curmsg < num; curmsg++) { int addr, i; msg = &msgs[curmsg]; - + addr = (0x7f & msg->addr) ; - + if (msg->flags & I2C_M_RD ) - printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n", + printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n", curmsg, msg->len, addr, (addr<<1) | 1); else { - printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s", + printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s", curmsg, msg->len, addr, addr<<1, msg->len == 0 ? "" : ", "); for(i=0; i < msg->len; i++) @@ -237,7 +225,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, case 0x10: /* A repeated start condition has been transmitted */ pca_address(adap, msg); break; - + case 0x18: /* SLA+W has been transmitted; ACK has been received */ case 0x28: /* Data byte in I2CDAT has been transmitted; ACK has been received */ if (numbytes < msg->len) { @@ -287,7 +275,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, case 0x38: /* Arbitration lost during SLA+W, SLA+R or data bytes */ DEB2("Arbitration lost\n"); goto out; - + case 0x58: /* Data byte has been received; NOT ACK has been returned */ if ( numbytes == msg->len - 1 ) { pca_rx_byte(adap, &msg->buf[numbytes], 0); @@ -317,16 +305,16 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, pca_reset(adap); goto out; default: - printk(KERN_ERR DRIVER ": unhandled SIO state 0x%02x\n", state); + dev_err(&i2c_adap->dev, "unhandled SIO state 0x%02x\n", state); break; } - + } ret = curmsg; out: DEB1(KERN_CRIT "}}} transfered %d/%d messages. " - "status is %#04x. control is %#04x\n", + "status is %#04x. control is %#04x\n", curmsg, num, pca_status(adap), pca_get_con(adap)); return ret; @@ -337,53 +325,65 @@ static u32 pca_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static int pca_init(struct i2c_algo_pca_data *adap) +static const struct i2c_algorithm pca_algo = { + .master_xfer = pca_xfer, + .functionality = pca_func, +}; + +static int pca_init(struct i2c_adapter *adap) { static int freqs[] = {330,288,217,146,88,59,44,36}; - int own, clock; + int clock; + struct i2c_algo_pca_data *pca_data = adap->algo_data; + + if (pca_data->i2c_clock > 7) { + printk(KERN_WARNING "%s: Invalid I2C clock speed selected. Trying default.\n", + adap->name); + pca_data->i2c_clock = I2C_PCA_CON_59kHz; + } + + adap->algo = &pca_algo; - own = pca_own(adap); - clock = pca_clock(adap); - DEB1(KERN_INFO DRIVER ": own address is %#04x\n", own); - DEB1(KERN_INFO DRIVER ": clock freqeuncy is %dkHz\n", freqs[clock]); + pca_reset(pca_data); - pca_outw(adap, I2C_PCA_ADR, own << 1); + clock = pca_clock(pca_data); + DEB1(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(adap, I2C_PCA_CON_ENSIO | clock); - udelay(500); /* 500 µs for oscilator to stabilise */ + pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + udelay(500); /* 500 us for oscilator to stabilise */ return 0; } -static const struct i2c_algorithm pca_algo = { - .master_xfer = pca_xfer, - .functionality = pca_func, -}; - -/* - * registering functions to load algorithms at runtime +/* + * registering functions to load algorithms at runtime */ int i2c_pca_add_bus(struct i2c_adapter *adap) { - struct i2c_algo_pca_data *pca_adap = adap->algo_data; int rval; - /* register new adapter to i2c module... */ - adap->algo = &pca_algo; + rval = pca_init(adap); + if (rval) + return rval; - adap->timeout = 100; /* default values, should */ - adap->retries = 3; /* be replaced by defines */ + return i2c_add_adapter(adap); +} +EXPORT_SYMBOL(i2c_pca_add_bus); - if ((rval = pca_init(pca_adap))) - return rval; +int i2c_pca_add_numbered_bus(struct i2c_adapter *adap) +{ + int rval; - rval = i2c_add_adapter(adap); + rval = pca_init(adap); + if (rval) + return rval; - return rval; + return i2c_add_numbered_adapter(adap); } -EXPORT_SYMBOL(i2c_pca_add_bus); +EXPORT_SYMBOL(i2c_pca_add_numbered_bus); -MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); +MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, " + "Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/algos/i2c-algo-pca.h b/drivers/i2c/algos/i2c-algo-pca.h deleted file mode 100644 index 2fee07e0521..00000000000 --- a/drivers/i2c/algos/i2c-algo-pca.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef I2C_PCA9564_H -#define I2C_PCA9564_H 1 - -#define I2C_PCA_STA 0x00 /* STATUS Read Only */ -#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */ -#define I2C_PCA_DAT 0x01 /* DATA Read/Write */ -#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */ -#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */ - -#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */ -#define I2C_PCA_CON_ENSIO 0x40 /* Enable */ -#define I2C_PCA_CON_STA 0x20 /* Start */ -#define I2C_PCA_CON_STO 0x10 /* Stop */ -#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ -#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ - -#define I2C_PCA_CON_330kHz 0x00 -#define I2C_PCA_CON_288kHz 0x01 -#define I2C_PCA_CON_217kHz 0x02 -#define I2C_PCA_CON_146kHz 0x03 -#define I2C_PCA_CON_88kHz 0x04 -#define I2C_PCA_CON_59kHz 0x05 -#define I2C_PCA_CON_44kHz 0x06 -#define I2C_PCA_CON_36kHz 0x07 - -#endif /* I2C_PCA9564_H */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index b04c99580d0..48438cc5d0c 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -100,9 +100,12 @@ config I2C_AU1550 config I2C_BLACKFIN_TWI tristate "Blackfin TWI I2C support" - depends on BF534 || BF536 || BF537 + depends on BLACKFIN help - This is the TWI I2C device driver for Blackfin 534/536/537/54x. + This is the TWI I2C device driver for Blackfin BF522, BF525, + BF527, BF534, BF536, BF537 and BF54x. For other Blackfin processors, + please don't use this driver. + This driver can also be built as a module. If so, the module will be called i2c-bfin-twi. @@ -135,7 +138,7 @@ config I2C_ELEKTOR This supports the PCF8584 ISA bus I2C adapter. Say Y if you own such an adapter. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called i2c-elektor. config I2C_GPIO @@ -190,7 +193,7 @@ config I2C_I810 select I2C_ALGOBIT help If you say yes to this option, support will be included for the Intel - 810/815 family of mainboard I2C interfaces. Specifically, the + 810/815 family of mainboard I2C interfaces. Specifically, the following versions of the chipset are supported: i810AA i810AB @@ -246,10 +249,10 @@ config I2C_PIIX4 config I2C_IBM_IIC tristate "IBM PPC 4xx on-chip I2C interface" - depends on IBM_OCP + depends on 4xx help - Say Y here if you want to use IIC peripheral found on - embedded IBM PPC 4xx based systems. + Say Y here if you want to use IIC peripheral found on + embedded IBM PPC 4xx based systems. This driver can also be built as a module. If so, the module will be called i2c-ibm_iic. @@ -269,7 +272,7 @@ config I2C_IXP2000 depends on ARCH_IXP2000 select I2C_ALGOBIT help - Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based + Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based system and are using GPIO lines for an I2C bus. This support is also available as a module. If so, the module @@ -354,7 +357,7 @@ config I2C_PARPORT on the parport driver. This is meant for embedded systems. Don't say Y here if you intend to say Y or M there. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called i2c-parport. config I2C_PARPORT_LIGHT @@ -372,12 +375,12 @@ config I2C_PARPORT_LIGHT the clean but heavy parport handling is not an option. The drawback is a reduced portability and the impossibility to daisy-chain other parallel port devices. - + Don't say Y here if you said Y or M to i2c-parport. Saying M to both is possible but both modules should not be loaded at the same time. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called i2c-parport-light. config I2C_PASEMI @@ -401,7 +404,7 @@ config I2C_PROSAVAGE This driver is deprecated in favor of the savagefb driver. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called i2c-prosavage. config I2C_S3C2410 @@ -417,7 +420,7 @@ config I2C_SAVAGE4 depends on PCI select I2C_ALGOBIT help - If you say yes to this option, support will be included for the + If you say yes to this option, support will be included for the S3 Savage 4 I2C interface. This driver is deprecated in favor of the savagefb driver. @@ -452,7 +455,7 @@ config SCx200_I2C If you don't know what to do here, say N. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called scx200_i2c. This driver is deprecated and will be dropped soon. Use i2c-gpio @@ -483,14 +486,14 @@ config SCx200_ACB If you don't know what to do here, say N. - This support is also available as a module. If so, the module + This support is also available as a module. If so, the module will be called scx200_acb. config I2C_SIS5595 tristate "SiS 5595" depends on PCI help - If you say yes to this option, support will be included for the + If you say yes to this option, support will be included for the SiS5595 SMBus (a subset of I2C) interface. This driver can also be built as a module. If so, the module @@ -500,7 +503,7 @@ config I2C_SIS630 tristate "SiS 630/730" depends on PCI help - If you say yes to this option, support will be included for the + If you say yes to this option, support will be included for the SiS630 and SiS730 SMBus (a subset of I2C) interface. This driver can also be built as a module. If so, the module @@ -632,9 +635,9 @@ config I2C_PCA_ISA select I2C_ALGOPCA default n help - This driver supports ISA boards using the Philips PCA 9564 - Parallel bus to I2C bus controller - + This driver supports ISA boards using the Philips PCA9564 + parallel bus to I2C bus controller. + This driver can also be built as a module. If so, the module will be called i2c-pca-isa. @@ -643,6 +646,17 @@ config I2C_PCA_ISA delays when I2C/SMBus chip drivers are loaded (e.g. at boot time). If unsure, say N. +config I2C_PCA_PLATFORM + tristate "PCA9564 as platform device" + select I2C_ALGOPCA + default n + help + This driver supports a memory mapped Philips PCA9564 + parallel bus to I2C bus controller. + + This driver can also be built as a module. If so, the module + will be called i2c-pca-platform. + config I2C_MV64XXX tristate "Marvell mv64xxx I2C Controller" depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL @@ -672,4 +686,23 @@ config I2C_PMCMSP This driver can also be built as module. If so, the module will be called i2c-pmcmsp. +config I2C_SH7760 + tristate "Renesas SH7760 I2C Controller" + depends on CPU_SUBTYPE_SH7760 + help + This driver supports the 2 I2C interfaces on the Renesas SH7760. + + This driver can also be built as a module. If so, the module + will be called i2c-sh7760. + +config I2C_SH_MOBILE + tristate "SuperH Mobile I2C Controller" + depends on SUPERH + help + If you say yes to this option, support will be included for the + built-in I2C interface on the Renesas SH-Mobile processor. + + This driver can also be built as a module. If so, the module + will be called i2c-sh_mobile. + endmenu diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index ea7068f1eb6..e8c882a5ea6 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o +obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o obj-$(CONFIG_I2C_PNX) += i2c-pnx.o @@ -37,6 +38,8 @@ obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o +obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o +obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index c09b036913b..73d61946a53 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -298,7 +298,7 @@ static int at91_i2c_resume(struct platform_device *pdev) #endif /* work with "modprobe at91_i2c" from hotplugging or coldplugging */ -MODULE_ALIAS("at91_i2c"); +MODULE_ALIAS("platform:at91_i2c"); static struct platform_driver at91_i2c_driver = { .probe = at91_i2c_probe, diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c index 1953b26da56..491718fe46b 100644 --- a/drivers/i2c/busses/i2c-au1550.c +++ b/drivers/i2c/busses/i2c-au1550.c @@ -472,6 +472,7 @@ i2c_au1550_exit(void) MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:au1xpsc_smbus"); module_init (i2c_au1550_init); module_exit (i2c_au1550_exit); diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 7dbdaeb707a..48d084bdf7c 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -1,25 +1,11 @@ /* - * drivers/i2c/busses/i2c-bfin-twi.c + * Blackfin On-Chip Two Wire Interface Driver * - * Description: Driver for Blackfin Two Wire Interface + * Copyright 2005-2007 Analog Devices Inc. * - * Author: sonicz <sonic.zhang@analog.com> + * Enter bugs at http://blackfin.uclinux.org/ * - * Copyright (c) 2005-2007 Analog Devices, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Licensed under the GPL-2 or later. */ #include <linux/module.h> @@ -34,14 +20,16 @@ #include <linux/platform_device.h> #include <asm/blackfin.h> +#include <asm/portmux.h> #include <asm/irq.h> #define POLL_TIMEOUT (2 * HZ) /* SMBus mode*/ -#define TWI_I2C_MODE_STANDARD 0x01 -#define TWI_I2C_MODE_STANDARDSUB 0x02 -#define TWI_I2C_MODE_COMBINED 0x04 +#define TWI_I2C_MODE_STANDARD 1 +#define TWI_I2C_MODE_STANDARDSUB 2 +#define TWI_I2C_MODE_COMBINED 3 +#define TWI_I2C_MODE_REPEAT 4 struct bfin_twi_iface { int irq; @@ -58,39 +46,74 @@ struct bfin_twi_iface { struct timer_list timeout_timer; struct i2c_adapter adap; struct completion complete; + struct i2c_msg *pmsg; + int msg_num; + int cur_msg; + void __iomem *regs_base; }; -static struct bfin_twi_iface twi_iface; + +#define DEFINE_TWI_REG(reg, off) \ +static inline u16 read_##reg(struct bfin_twi_iface *iface) \ + { return bfin_read16(iface->regs_base + (off)); } \ +static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \ + { bfin_write16(iface->regs_base + (off), v); } + +DEFINE_TWI_REG(CLKDIV, 0x00) +DEFINE_TWI_REG(CONTROL, 0x04) +DEFINE_TWI_REG(SLAVE_CTL, 0x08) +DEFINE_TWI_REG(SLAVE_STAT, 0x0C) +DEFINE_TWI_REG(SLAVE_ADDR, 0x10) +DEFINE_TWI_REG(MASTER_CTL, 0x14) +DEFINE_TWI_REG(MASTER_STAT, 0x18) +DEFINE_TWI_REG(MASTER_ADDR, 0x1C) +DEFINE_TWI_REG(INT_STAT, 0x20) +DEFINE_TWI_REG(INT_MASK, 0x24) +DEFINE_TWI_REG(FIFO_CTL, 0x28) +DEFINE_TWI_REG(FIFO_STAT, 0x2C) +DEFINE_TWI_REG(XMT_DATA8, 0x80) +DEFINE_TWI_REG(XMT_DATA16, 0x84) +DEFINE_TWI_REG(RCV_DATA8, 0x88) +DEFINE_TWI_REG(RCV_DATA16, 0x8C) + +static const u16 pin_req[2][3] = { + {P_TWI0_SCL, P_TWI0_SDA, 0}, + {P_TWI1_SCL, P_TWI1_SDA, 0}, +}; static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) { - unsigned short twi_int_status = bfin_read_TWI_INT_STAT(); - unsigned short mast_stat = bfin_read_TWI_MASTER_STAT(); + unsigned short twi_int_status = read_INT_STAT(iface); + unsigned short mast_stat = read_MASTER_STAT(iface); if (twi_int_status & XMTSERV) { /* Transmit next data */ if (iface->writeNum > 0) { - bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); + write_XMT_DATA8(iface, *(iface->transPtr++)); iface->writeNum--; } /* start receive immediately after complete sending in * combine mode. */ - else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() - | MDIR | RSTART); - } else if (iface->manual_stop) - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() - | STOP); + else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | MDIR | RSTART); + else if (iface->manual_stop) + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | STOP); + else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && + iface->cur_msg+1 < iface->msg_num) + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | RSTART); SSYNC(); /* Clear status */ - bfin_write_TWI_INT_STAT(XMTSERV); + write_INT_STAT(iface, XMTSERV); SSYNC(); } if (twi_int_status & RCVSERV) { if (iface->readNum > 0) { /* Receive next data */ - *(iface->transPtr) = bfin_read_TWI_RCV_DATA8(); + *(iface->transPtr) = read_RCV_DATA8(iface); if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { /* Change combine mode into sub mode after * read first data. @@ -105,28 +128,33 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) iface->transPtr++; iface->readNum--; } else if (iface->manual_stop) { - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() - | STOP); + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | STOP); + SSYNC(); + } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && + iface->cur_msg+1 < iface->msg_num) { + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | RSTART); SSYNC(); } /* Clear interrupt source */ - bfin_write_TWI_INT_STAT(RCVSERV); + write_INT_STAT(iface, RCVSERV); SSYNC(); } if (twi_int_status & MERR) { - bfin_write_TWI_INT_STAT(MERR); - bfin_write_TWI_INT_MASK(0); - bfin_write_TWI_MASTER_STAT(0x3e); - bfin_write_TWI_MASTER_CTL(0); + write_INT_STAT(iface, MERR); + write_INT_MASK(iface, 0); + write_MASTER_STAT(iface, 0x3e); + write_MASTER_CTL(iface, 0); SSYNC(); - iface->result = -1; + iface->result = -EIO; /* if both err and complete int stats are set, return proper * results. */ if (twi_int_status & MCOMP) { - bfin_write_TWI_INT_STAT(MCOMP); - bfin_write_TWI_INT_MASK(0); - bfin_write_TWI_MASTER_CTL(0); + write_INT_STAT(iface, MCOMP); + write_INT_MASK(iface, 0); + write_MASTER_CTL(iface, 0); SSYNC(); /* If it is a quick transfer, only address bug no data, * not an err, return 1. @@ -143,7 +171,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) return; } if (twi_int_status & MCOMP) { - bfin_write_TWI_INT_STAT(MCOMP); + write_INT_STAT(iface, MCOMP); SSYNC(); if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { if (iface->readNum == 0) { @@ -152,28 +180,63 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) */ iface->readNum = 1; iface->manual_stop = 1; - bfin_write_TWI_MASTER_CTL( - bfin_read_TWI_MASTER_CTL() - | (0xff << 6)); + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | (0xff << 6)); } else { /* set the readd number in other * combine mode. */ - bfin_write_TWI_MASTER_CTL( - (bfin_read_TWI_MASTER_CTL() & + write_MASTER_CTL(iface, + (read_MASTER_CTL(iface) & (~(0xff << 6))) | - ( iface->readNum << 6)); + (iface->readNum << 6)); + } + /* remove restart bit and enable master receive */ + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) & ~RSTART); + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | MEN | MDIR); + SSYNC(); + } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && + iface->cur_msg+1 < iface->msg_num) { + iface->cur_msg++; + iface->transPtr = iface->pmsg[iface->cur_msg].buf; + iface->writeNum = iface->readNum = + iface->pmsg[iface->cur_msg].len; + /* Set Transmit device address */ + write_MASTER_ADDR(iface, + iface->pmsg[iface->cur_msg].addr); + if (iface->pmsg[iface->cur_msg].flags & I2C_M_RD) + iface->read_write = I2C_SMBUS_READ; + else { + iface->read_write = I2C_SMBUS_WRITE; + /* Transmit first data */ + if (iface->writeNum > 0) { + write_XMT_DATA8(iface, + *(iface->transPtr++)); + iface->writeNum--; + SSYNC(); + } + } + + if (iface->pmsg[iface->cur_msg].len <= 255) + write_MASTER_CTL(iface, + iface->pmsg[iface->cur_msg].len << 6); + else { + write_MASTER_CTL(iface, 0xff << 6); + iface->manual_stop = 1; } /* remove restart bit and enable master receive */ - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() & - ~RSTART); - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | - MEN | MDIR); + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) & ~RSTART); + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | + MEN | ((iface->read_write == I2C_SMBUS_READ) ? + MDIR : 0)); SSYNC(); } else { iface->result = 1; - bfin_write_TWI_INT_MASK(0); - bfin_write_TWI_MASTER_CTL(0); + write_INT_MASK(iface, 0); + write_MASTER_CTL(iface, 0); SSYNC(); complete(&iface->complete); } @@ -221,91 +284,85 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap, { struct bfin_twi_iface *iface = adap->algo_data; struct i2c_msg *pmsg; - int i, ret; int rc = 0; - if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) + if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; - while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { + while (read_MASTER_STAT(iface) & BUSBUSY) yield(); + + iface->pmsg = msgs; + iface->msg_num = num; + iface->cur_msg = 0; + + pmsg = &msgs[0]; + if (pmsg->flags & I2C_M_TEN) { + dev_err(&adap->dev, "10 bits addr not supported!\n"); + return -EINVAL; } - ret = 0; - for (i = 0; rc >= 0 && i < num; i++) { - pmsg = &msgs[i]; - if (pmsg->flags & I2C_M_TEN) { - dev_err(&(adap->dev), "i2c-bfin-twi: 10 bits addr " - "not supported !\n"); - rc = -EINVAL; - break; - } + iface->cur_mode = TWI_I2C_MODE_REPEAT; + iface->manual_stop = 0; + iface->transPtr = pmsg->buf; + iface->writeNum = iface->readNum = pmsg->len; + iface->result = 0; + iface->timeout_count = 10; + init_completion(&(iface->complete)); + /* Set Transmit device address */ + write_MASTER_ADDR(iface, pmsg->addr); - iface->cur_mode = TWI_I2C_MODE_STANDARD; - iface->manual_stop = 0; - iface->transPtr = pmsg->buf; - iface->writeNum = iface->readNum = pmsg->len; - iface->result = 0; - iface->timeout_count = 10; - /* Set Transmit device address */ - bfin_write_TWI_MASTER_ADDR(pmsg->addr); - - /* FIFO Initiation. Data in FIFO should be - * discarded before start a new operation. - */ - bfin_write_TWI_FIFO_CTL(0x3); - SSYNC(); - bfin_write_TWI_FIFO_CTL(0); - SSYNC(); + /* FIFO Initiation. Data in FIFO should be + * discarded before start a new operation. + */ + write_FIFO_CTL(iface, 0x3); + SSYNC(); + write_FIFO_CTL(iface, 0); + SSYNC(); - if (pmsg->flags & I2C_M_RD) - iface->read_write = I2C_SMBUS_READ; - else { - iface->read_write = I2C_SMBUS_WRITE; - /* Transmit first data */ - if (iface->writeNum > 0) { - bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); - iface->writeNum--; - SSYNC(); - } + if (pmsg->flags & I2C_M_RD) + iface->read_write = I2C_SMBUS_READ; + else { + iface->read_write = I2C_SMBUS_WRITE; + /* Transmit first data */ + if (iface->writeNum > 0) { + write_XMT_DATA8(iface, *(iface->transPtr++)); + iface->writeNum--; + SSYNC(); } + } - /* clear int stat */ - bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); + /* clear int stat */ + write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV); - /* Interrupt mask . Enable XMT, RCV interrupt */ - bfin_write_TWI_INT_MASK(MCOMP | MERR | - ((iface->read_write == I2C_SMBUS_READ)? - RCVSERV : XMTSERV)); - SSYNC(); + /* Interrupt mask . Enable XMT, RCV interrupt */ + write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV); + SSYNC(); - if (pmsg->len > 0 && pmsg->len <= 255) - bfin_write_TWI_MASTER_CTL(pmsg->len << 6); - else if (pmsg->len > 255) { - bfin_write_TWI_MASTER_CTL(0xff << 6); - iface->manual_stop = 1; - } else - break; + if (pmsg->len <= 255) + write_MASTER_CTL(iface, pmsg->len << 6); + else { + write_MASTER_CTL(iface, 0xff << 6); + iface->manual_stop = 1; + } - iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; - add_timer(&iface->timeout_timer); + iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; + add_timer(&iface->timeout_timer); - /* Master enable */ - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | - ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | - ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); - SSYNC(); + /* Master enable */ + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | + ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | + ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); + SSYNC(); - wait_for_completion(&iface->complete); + wait_for_completion(&iface->complete); - rc = iface->result; - if (rc == 1) - ret++; - else if (rc == -1) - break; - } + rc = iface->result; - return ret; + if (rc == 1) + return num; + else + return rc; } /* @@ -319,12 +376,11 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, struct bfin_twi_iface *iface = adap->algo_data; int rc = 0; - if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) + if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; - while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { + while (read_MASTER_STAT(iface) & BUSBUSY) yield(); - } iface->writeNum = 0; iface->readNum = 0; @@ -392,19 +448,20 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, iface->read_write = read_write; iface->command = command; iface->timeout_count = 10; + init_completion(&(iface->complete)); /* FIFO Initiation. Data in FIFO should be discarded before * start a new operation. */ - bfin_write_TWI_FIFO_CTL(0x3); + write_FIFO_CTL(iface, 0x3); SSYNC(); - bfin_write_TWI_FIFO_CTL(0); + write_FIFO_CTL(iface, 0); /* clear int stat */ - bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); + write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV); /* Set Transmit device address */ - bfin_write_TWI_MASTER_ADDR(addr); + write_MASTER_ADDR(iface, addr); SSYNC(); iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; @@ -412,60 +469,64 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, switch (iface->cur_mode) { case TWI_I2C_MODE_STANDARDSUB: - bfin_write_TWI_XMT_DATA8(iface->command); - bfin_write_TWI_INT_MASK(MCOMP | MERR | + write_XMT_DATA8(iface, iface->command); + write_INT_MASK(iface, MCOMP | MERR | ((iface->read_write == I2C_SMBUS_READ) ? RCVSERV : XMTSERV)); SSYNC(); if (iface->writeNum + 1 <= 255) - bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); + write_MASTER_CTL(iface, (iface->writeNum + 1) << 6); else { - bfin_write_TWI_MASTER_CTL(0xff << 6); + write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } /* Master enable */ - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); break; case TWI_I2C_MODE_COMBINED: - bfin_write_TWI_XMT_DATA8(iface->command); - bfin_write_TWI_INT_MASK(MCOMP | MERR | RCVSERV | XMTSERV); + write_XMT_DATA8(iface, iface->command); + write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV); SSYNC(); if (iface->writeNum > 0) - bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); + write_MASTER_CTL(iface, (iface->writeNum + 1) << 6); else - bfin_write_TWI_MASTER_CTL(0x1 << 6); + write_MASTER_CTL(iface, 0x1 << 6); /* Master enable */ - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); break; default: - bfin_write_TWI_MASTER_CTL(0); + write_MASTER_CTL(iface, 0); if (size != I2C_SMBUS_QUICK) { /* Don't access xmit data register when this is a * read operation. */ if (iface->read_write != I2C_SMBUS_READ) { if (iface->writeNum > 0) { - bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); + write_XMT_DATA8(iface, + *(iface->transPtr++)); if (iface->writeNum <= 255) - bfin_write_TWI_MASTER_CTL(iface->writeNum << 6); + write_MASTER_CTL(iface, + iface->writeNum << 6); else { - bfin_write_TWI_MASTER_CTL(0xff << 6); + write_MASTER_CTL(iface, + 0xff << 6); iface->manual_stop = 1; } iface->writeNum--; } else { - bfin_write_TWI_XMT_DATA8(iface->command); - bfin_write_TWI_MASTER_CTL(1 << 6); + write_XMT_DATA8(iface, iface->command); + write_MASTER_CTL(iface, 1 << 6); } } else { if (iface->readNum > 0 && iface->readNum <= 255) - bfin_write_TWI_MASTER_CTL(iface->readNum << 6); + write_MASTER_CTL(iface, + iface->readNum << 6); else if (iface->readNum > 255) { - bfin_write_TWI_MASTER_CTL(0xff << 6); + write_MASTER_CTL(iface, 0xff << 6); iface->manual_stop = 1; } else { del_timer(&iface->timeout_timer); @@ -473,13 +534,13 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, } } } - bfin_write_TWI_INT_MASK(MCOMP | MERR | + write_INT_MASK(iface, MCOMP | MERR | ((iface->read_write == I2C_SMBUS_READ) ? RCVSERV : XMTSERV)); SSYNC(); /* Master enable */ - bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); break; @@ -514,10 +575,10 @@ static struct i2c_algorithm bfin_twi_algorithm = { static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state) { -/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ + struct bfin_twi_iface *iface = platform_get_drvdata(dev); /* Disable TWI */ - bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() & ~TWI_ENA); + write_CONTROL(iface, read_CONTROL(iface) & ~TWI_ENA); SSYNC(); return 0; @@ -525,24 +586,52 @@ static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state) static int i2c_bfin_twi_resume(struct platform_device *dev) { -/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ + struct bfin_twi_iface *iface = platform_get_drvdata(dev); /* Enable TWI */ - bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); + write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); SSYNC(); return 0; } -static int i2c_bfin_twi_probe(struct platform_device *dev) +static int i2c_bfin_twi_probe(struct platform_device *pdev) { - struct bfin_twi_iface *iface = &twi_iface; + struct bfin_twi_iface *iface; struct i2c_adapter *p_adap; + struct resource *res; int rc; + iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL); + if (!iface) { + dev_err(&pdev->dev, "Cannot allocate memory\n"); + rc = -ENOMEM; + goto out_error_nomem; + } + spin_lock_init(&(iface->lock)); - init_completion(&(iface->complete)); - iface->irq = IRQ_TWI; + + /* Find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + rc = -ENOENT; + goto out_error_get_res; + } + + iface->regs_base = ioremap(res->start, res->end - res->start + 1); + if (iface->regs_base == NULL) { + dev_err(&pdev->dev, "Cannot map IO\n"); + rc = -ENXIO; + goto out_error_ioremap; + } + + iface->irq = platform_get_irq(pdev, 0); + if (iface->irq < 0) { + dev_err(&pdev->dev, "No IRQ specified\n"); + rc = -ENOENT; + goto out_error_no_irq; + } init_timer(&(iface->timeout_timer)); iface->timeout_timer.function = bfin_twi_timeout; @@ -550,39 +639,63 @@ static int i2c_bfin_twi_probe(struct platform_device *dev) p_adap = &iface->adap; p_adap->id = I2C_HW_BLACKFIN; - p_adap->nr = dev->id; - strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); + p_adap->nr = pdev->id; + strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); p_adap->algo = &bfin_twi_algorithm; p_adap->algo_data = iface; p_adap->class = I2C_CLASS_ALL; - p_adap->dev.parent = &dev->dev; + p_adap->dev.parent = &pdev->dev; + + rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi"); + if (rc) { + dev_err(&pdev->dev, "Can't setup pin mux!\n"); + goto out_error_pin_mux; + } rc = request_irq(iface->irq, bfin_twi_interrupt_entry, - IRQF_DISABLED, dev->name, iface); + IRQF_DISABLED, pdev->name, iface); if (rc) { - dev_err(&(p_adap->dev), "i2c-bfin-twi: can't get IRQ %d !\n", - iface->irq); - return -ENODEV; + dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq); + rc = -ENODEV; + goto out_error_req_irq; } /* Set TWI internal clock as 10MHz */ - bfin_write_TWI_CONTROL(((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); + write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); /* Set Twi interface clock as specified */ - bfin_write_TWI_CLKDIV((( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) - << 8) | (( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) + write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) + << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) & 0xFF)); /* Enable TWI */ - bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); + write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); SSYNC(); rc = i2c_add_numbered_adapter(p_adap); - if (rc < 0) - free_irq(iface->irq, iface); - else - platform_set_drvdata(dev, iface); + if (rc < 0) { + dev_err(&pdev->dev, "Can't add i2c adapter!\n"); + goto out_error_add_adapter; + } + + platform_set_drvdata(pdev, iface); + dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " + "regs_base@%p\n", iface->regs_base); + + return 0; + +out_error_add_adapter: + free_irq(iface->irq, iface); +out_error_req_irq: +out_error_no_irq: + peripheral_free_list(pin_req[pdev->id]); +out_error_pin_mux: + iounmap(iface->regs_base); +out_error_ioremap: +out_error_get_res: + kfree(iface); +out_error_nomem: return rc; } @@ -594,6 +707,9 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev) i2c_del_adapter(&(iface->adap)); free_irq(iface->irq, iface); + peripheral_free_list(pin_req[pdev->id]); + iounmap(iface->regs_base); + kfree(iface); return 0; } @@ -611,8 +727,6 @@ static struct platform_driver i2c_bfin_twi_driver = { static int __init i2c_bfin_twi_init(void) { - pr_info("I2C: Blackfin I2C TWI driver\n"); - return platform_driver_register(&i2c_bfin_twi_driver); } @@ -621,9 +735,10 @@ static void __exit i2c_bfin_twi_exit(void) platform_driver_unregister(&i2c_bfin_twi_driver); } -MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); -MODULE_DESCRIPTION("I2C-Bus adapter routines for Blackfin TWI"); -MODULE_LICENSE("GPL"); - module_init(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); + +MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); +MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:i2c-bfin-twi"); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index fde26345a37..7ecbfc429b1 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -328,7 +328,7 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) int i; int ret; - dev_dbg(dev->dev, "%s: msgs: %d\n", __FUNCTION__, num); + dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); ret = i2c_davinci_wait_bus_not_busy(dev, 1); if (ret < 0) { @@ -342,7 +342,7 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) return ret; } - dev_dbg(dev->dev, "%s:%d ret: %d\n", __FUNCTION__, __LINE__, ret); + dev_dbg(dev->dev, "%s:%d ret: %d\n", __func__, __LINE__, ret); return num; } @@ -364,7 +364,7 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) u16 w; while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) { - dev_dbg(dev->dev, "%s: stat=0x%x\n", __FUNCTION__, stat); + dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat); if (count++ == 100) { dev_warn(dev->dev, "Too much work in one IRQ\n"); break; @@ -553,6 +553,9 @@ static int davinci_i2c_remove(struct platform_device *pdev) return 0; } +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:i2c_davinci"); + static struct platform_driver davinci_i2c_driver = { .probe = davinci_i2c_probe, .remove = davinci_i2c_remove, diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 3ca19fc234f..7c1b762aa68 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -220,3 +220,4 @@ module_exit(i2c_gpio_exit); MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:i2c-gpio"); diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 22bb247d0e6..85dbf34382e 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -6,6 +6,9 @@ * Copyright (c) 2003, 2004 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * + * Copyright (c) 2008 PIKA Technologies + * Sean MacLennan <smaclennan@pikatech.com> + * * Based on original work by * Ian DaSilva <idasilva@mvista.com> * Armin Kuster <akuster@mvista.com> @@ -39,12 +42,17 @@ #include <asm/io.h> #include <linux/i2c.h> #include <linux/i2c-id.h> + +#ifdef CONFIG_IBM_OCP #include <asm/ocp.h> #include <asm/ibm4xx.h> +#else +#include <linux/of_platform.h> +#endif #include "i2c-ibm_iic.h" -#define DRIVER_VERSION "2.1" +#define DRIVER_VERSION "2.2" MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION); MODULE_LICENSE("GPL"); @@ -650,13 +658,14 @@ static inline u8 iic_clckdiv(unsigned int opb) opb /= 1000000; if (opb < 20 || opb > 150){ - printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n", + printk(KERN_WARNING "ibm-iic: invalid OPB clock frequency %u MHz\n", opb); opb = opb < 20 ? 20 : 150; } return (u8)((opb + 9) / 10 - 1); } +#ifdef CONFIG_IBM_OCP /* * Register single IIC interface */ @@ -672,7 +681,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ ocp->def->index); if (!(dev = kzalloc(sizeof(*dev), GFP_KERNEL))) { - printk(KERN_CRIT "ibm-iic%d: failed to allocate device data\n", + printk(KERN_ERR "ibm-iic%d: failed to allocate device data\n", ocp->def->index); return -ENOMEM; } @@ -687,7 +696,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ } if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){ - printk(KERN_CRIT "ibm-iic%d: failed to ioremap device registers\n", + printk(KERN_ERR "ibm-iic%d: failed to ioremap device registers\n", dev->idx); ret = -ENXIO; goto fail2; @@ -745,7 +754,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ adap->nr = dev->idx >= 0 ? dev->idx : 0; if ((ret = i2c_add_numbered_adapter(adap)) < 0) { - printk(KERN_CRIT "ibm-iic%d: failed to register i2c adapter\n", + printk(KERN_ERR "ibm-iic%d: failed to register i2c adapter\n", dev->idx); goto fail; } @@ -778,7 +787,7 @@ static void __devexit iic_remove(struct ocp_device *ocp) struct ibm_iic_private* dev = (struct ibm_iic_private*)ocp_get_drvdata(ocp); BUG_ON(dev == NULL); if (i2c_del_adapter(&dev->adap)){ - printk(KERN_CRIT "ibm-iic%d: failed to delete i2c adapter :(\n", + printk(KERN_ERR "ibm-iic%d: failed to delete i2c adapter :(\n", dev->idx); /* That's *very* bad, just shutdown IRQ ... */ if (dev->irq >= 0){ @@ -828,5 +837,181 @@ static void __exit iic_exit(void) ocp_unregister_driver(&ibm_iic_driver); } +#else /* !CONFIG_IBM_OCP */ + +static int __devinit iic_request_irq(struct of_device *ofdev, + struct ibm_iic_private *dev) +{ + struct device_node *np = ofdev->node; + int irq; + + if (iic_force_poll) + return NO_IRQ; + + irq = irq_of_parse_and_map(np, 0); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n"); + return NO_IRQ; + } + + /* Disable interrupts until we finish initialization, assumes + * level-sensitive IRQ setup... + */ + iic_interrupt_mode(dev, 0); + if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) { + dev_err(&ofdev->dev, "request_irq %d failed\n", irq); + /* Fallback to the polling mode */ + return NO_IRQ; + } + + return irq; +} + +/* + * Register single IIC interface + */ +static int __devinit iic_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct ibm_iic_private *dev; + struct i2c_adapter *adap; + const u32 *indexp, *freq; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(&ofdev->dev, "failed to allocate device data\n"); + return -ENOMEM; + } + + dev_set_drvdata(&ofdev->dev, dev); + + indexp = of_get_property(np, "index", NULL); + if (!indexp) { + dev_err(&ofdev->dev, "no index specified\n"); + ret = -EINVAL; + goto error_cleanup; + } + dev->idx = *indexp; + + dev->vaddr = of_iomap(np, 0); + if (dev->vaddr == NULL) { + dev_err(&ofdev->dev, "failed to iomap device\n"); + ret = -ENXIO; + goto error_cleanup; + } + + init_waitqueue_head(&dev->wq); + + dev->irq = iic_request_irq(ofdev, dev); + if (dev->irq == NO_IRQ) + dev_warn(&ofdev->dev, "using polling mode\n"); + + /* Board specific settings */ + if (iic_force_fast || of_get_property(np, "fast-mode", NULL)) + dev->fast_mode = 1; + + freq = of_get_property(np, "clock-frequency", NULL); + if (freq == NULL) { + freq = of_get_property(np->parent, "clock-frequency", NULL); + if (freq == NULL) { + dev_err(&ofdev->dev, "Unable to get bus frequency\n"); + ret = -EINVAL; + goto error_cleanup; + } + } + + dev->clckdiv = iic_clckdiv(*freq); + dev_dbg(&ofdev->dev, "clckdiv = %d\n", dev->clckdiv); + + /* Initialize IIC interface */ + iic_dev_init(dev); + + /* Register it with i2c layer */ + adap = &dev->adap; + adap->dev.parent = &ofdev->dev; + strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); + i2c_set_adapdata(adap, dev); + adap->id = I2C_HW_OCP; + adap->class = I2C_CLASS_HWMON; + adap->algo = &iic_algo; + adap->timeout = 1; + adap->nr = dev->idx; + + ret = i2c_add_numbered_adapter(adap); + if (ret < 0) { + dev_err(&ofdev->dev, "failed to register i2c adapter\n"); + goto error_cleanup; + } + + dev_info(&ofdev->dev, "using %s mode\n", + dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); + + return 0; + +error_cleanup: + if (dev->irq != NO_IRQ) { + iic_interrupt_mode(dev, 0); + free_irq(dev->irq, dev); + } + + if (dev->vaddr) + iounmap(dev->vaddr); + + dev_set_drvdata(&ofdev->dev, NULL); + kfree(dev); + return ret; +} + +/* + * Cleanup initialized IIC interface + */ +static int __devexit iic_remove(struct of_device *ofdev) +{ + struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + i2c_del_adapter(&dev->adap); + + if (dev->irq != NO_IRQ) { + iic_interrupt_mode(dev, 0); + free_irq(dev->irq, dev); + } + + iounmap(dev->vaddr); + kfree(dev); + + return 0; +} + +static const struct of_device_id ibm_iic_match[] = { + { .compatible = "ibm,iic-405ex", }, + { .compatible = "ibm,iic-405gp", }, + { .compatible = "ibm,iic-440gp", }, + { .compatible = "ibm,iic-440gpx", }, + { .compatible = "ibm,iic-440grx", }, + {} +}; + +static struct of_platform_driver ibm_iic_driver = { + .name = "ibm-iic", + .match_table = ibm_iic_match, + .probe = iic_probe, + .remove = __devexit_p(iic_remove), +}; + +static int __init iic_init(void) +{ + return of_register_platform_driver(&ibm_iic_driver); +} + +static void __exit iic_exit(void) +{ + of_unregister_platform_driver(&ibm_iic_driver); +} +#endif /* CONFIG_IBM_OCP */ + module_init(iic_init); module_exit(iic_exit); diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index ab41400c883..39884e79759 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -550,3 +550,4 @@ module_exit (i2c_iop3xx_exit); MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>"); MODULE_DESCRIPTION("IOP3xx iic algorithm and driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:IOP3xx-I2C"); diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c index 6352121a282..5af9e6521e6 100644 --- a/drivers/i2c/busses/i2c-ixp2000.c +++ b/drivers/i2c/busses/i2c-ixp2000.c @@ -164,4 +164,5 @@ module_exit(ixp2000_i2c_exit); MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>"); MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:IXP2000-I2C"); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index bbe787b243b..18beb0ad7bf 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -392,6 +392,9 @@ static int fsl_i2c_remove(struct platform_device *pdev) return 0; }; +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:fsl-i2c"); + /* Structure for a device driver */ static struct platform_driver fsl_i2c_driver = { .probe = fsl_i2c_probe, diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index e417c2c3ca2..f145692cbb7 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -312,6 +312,9 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev) return 0; } +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:ocores-i2c"); + static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, .remove = __devexit_p(ocores_i2c_remove), diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 7ba31770d77..e7eb7bf9dde 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -693,3 +693,4 @@ module_exit(omap_i2c_exit_driver); MODULE_AUTHOR("MontaVista Software, Inc. (and others)"); MODULE_DESCRIPTION("TI OMAP I2C bus adapter"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:i2c_omap"); diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index 496ee875eb4..a119784bae1 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -1,6 +1,7 @@ /* * i2c-pca-isa.c driver for PCA9564 on ISA boards * Copyright (C) 2004 Arcom Control Systems + * Copyright (C) 2008 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,11 +23,9 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/wait.h> - #include <linux/isa.h> #include <linux/i2c.h> #include <linux/i2c-algo-pca.h> @@ -34,13 +33,9 @@ #include <asm/io.h> #include <asm/irq.h> -#include "../algos/i2c-algo-pca.h" - +#define DRIVER "i2c-pca-isa" #define IO_SIZE 4 -#undef DEBUG_IO -//#define DEBUG_IO - static unsigned long base = 0x330; static int irq = 10; @@ -48,22 +43,9 @@ static int irq = 10; * in the actual clock rate */ static int clock = I2C_PCA_CON_59kHz; -static int own = 0x55; - static wait_queue_head_t pca_wait; -static int pca_isa_getown(struct i2c_algo_pca_data *adap) -{ - return (own); -} - -static int pca_isa_getclock(struct i2c_algo_pca_data *adap) -{ - return (clock); -} - -static void -pca_isa_writebyte(struct i2c_algo_pca_data *adap, int reg, int val) +static void pca_isa_writebyte(void *pd, int reg, int val) { #ifdef DEBUG_IO static char *names[] = { "T/O", "DAT", "ADR", "CON" }; @@ -72,44 +54,49 @@ pca_isa_writebyte(struct i2c_algo_pca_data *adap, int reg, int val) outb(val, base+reg); } -static int -pca_isa_readbyte(struct i2c_algo_pca_data *adap, int reg) +static int pca_isa_readbyte(void *pd, int reg) { int res = inb(base+reg); #ifdef DEBUG_IO { - static char *names[] = { "STA", "DAT", "ADR", "CON" }; + static char *names[] = { "STA", "DAT", "ADR", "CON" }; printk("*** read %s => %#04x\n", names[reg], res); } #endif return res; } -static int pca_isa_waitforinterrupt(struct i2c_algo_pca_data *adap) +static int pca_isa_waitforcompletion(void *pd) { int ret = 0; if (irq > -1) { ret = wait_event_interruptible(pca_wait, - pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI); + pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI); } else { - while ((pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) + while ((pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) udelay(100); } return ret; } +static void pca_isa_resetchip(void *pd) +{ + /* apparently only an external reset will do it. not a lot can be done */ + printk(KERN_WARNING DRIVER ": Haven't figured out how to do a reset yet\n"); +} + static irqreturn_t pca_handler(int this_irq, void *dev_id) { wake_up_interruptible(&pca_wait); return IRQ_HANDLED; } static struct i2c_algo_pca_data pca_isa_data = { - .get_own = pca_isa_getown, - .get_clock = pca_isa_getclock, + /* .data intentionally left NULL, not needed with ISA */ .write_byte = pca_isa_writebyte, .read_byte = pca_isa_readbyte, - .wait_for_interrupt = pca_isa_waitforinterrupt, + .wait_for_completion = pca_isa_waitforcompletion, + .reset_chip = pca_isa_resetchip, }; static struct i2c_adapter pca_isa_ops = { @@ -117,6 +104,7 @@ static struct i2c_adapter pca_isa_ops = { .id = I2C_HW_A_ISA, .algo_data = &pca_isa_data, .name = "PCA9564 ISA Adapter", + .timeout = 100, }; static int __devinit pca_isa_probe(struct device *dev, unsigned int id) @@ -144,6 +132,7 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id) } } + pca_isa_data.i2c_clock = clock; if (i2c_pca_add_bus(&pca_isa_ops) < 0) { dev_err(dev, "Failed to add i2c bus\n"); goto out_irq; @@ -178,7 +167,7 @@ static struct isa_driver pca_isa_driver = { .remove = __devexit_p(pca_isa_remove), .driver = { .owner = THIS_MODULE, - .name = "i2c-pca-isa", + .name = DRIVER, } }; @@ -204,7 +193,5 @@ MODULE_PARM_DESC(irq, "IRQ"); module_param(clock, int, 0); MODULE_PARM_DESC(clock, "Clock rate as described in table 1 of PCA9564 datasheet"); -module_param(own, int, 0); /* the driver can't do slave mode, so there's no real point in this */ - module_init(pca_isa_init); module_exit(pca_isa_exit); diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c new file mode 100644 index 00000000000..9d75f51e8f0 --- /dev/null +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -0,0 +1,298 @@ +/* + * i2c_pca_platform.c + * + * Platform driver for the PCA9564 I2C controller. + * + * Copyright (C) 2008 Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/i2c-algo-pca.h> +#include <linux/i2c-pca-platform.h> +#include <linux/gpio.h> + +#include <asm/irq.h> +#include <asm/io.h> + +#define res_len(r) ((r)->end - (r)->start + 1) + +struct i2c_pca_pf_data { + void __iomem *reg_base; + int irq; /* if 0, use polling */ + int gpio; + wait_queue_head_t wait; + struct i2c_adapter adap; + struct i2c_algo_pca_data algo_data; + unsigned long io_base; + unsigned long io_size; +}; + +/* Read/Write functions for different register alignments */ + +static int i2c_pca_pf_readbyte8(void *pd, int reg) +{ + struct i2c_pca_pf_data *i2c = pd; + return ioread8(i2c->reg_base + reg); +} + +static int i2c_pca_pf_readbyte16(void *pd, int reg) +{ + struct i2c_pca_pf_data *i2c = pd; + return ioread8(i2c->reg_base + reg * 2); +} + +static int i2c_pca_pf_readbyte32(void *pd, int reg) +{ + struct i2c_pca_pf_data *i2c = pd; + return ioread8(i2c->reg_base + reg * 4); +} + +static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) +{ + struct i2c_pca_pf_data *i2c = pd; + iowrite8(val, i2c->reg_base + reg); +} + +static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) +{ + struct i2c_pca_pf_data *i2c = pd; + iowrite8(val, i2c->reg_base + reg * 2); +} + +static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) +{ + struct i2c_pca_pf_data *i2c = pd; + iowrite8(val, i2c->reg_base + reg * 4); +} + + +static int i2c_pca_pf_waitforcompletion(void *pd) +{ + struct i2c_pca_pf_data *i2c = pd; + int ret = 0; + + if (i2c->irq) { + ret = wait_event_interruptible(i2c->wait, + i2c->algo_data.read_byte(i2c, I2C_PCA_CON) + & I2C_PCA_CON_SI); + } else { + /* + * Do polling... + * XXX: Could get stuck in extreme cases! + * Maybe add timeout, but using irqs is preferred anyhow. + */ + while ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) + & I2C_PCA_CON_SI) == 0) + udelay(100); + } + + return ret; +} + +static void i2c_pca_pf_dummyreset(void *pd) +{ + struct i2c_pca_pf_data *i2c = pd; + printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n", + i2c->adap.name); +} + +static void i2c_pca_pf_resetchip(void *pd) +{ + struct i2c_pca_pf_data *i2c = pd; + + gpio_set_value(i2c->gpio, 0); + ndelay(100); + gpio_set_value(i2c->gpio, 1); +} + +static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) +{ + struct i2c_pca_pf_data *i2c = dev_id; + + if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) + return IRQ_NONE; + + wake_up_interruptible(&i2c->wait); + + return IRQ_HANDLED; +} + + +static int __devinit i2c_pca_pf_probe(struct platform_device *pdev) +{ + struct i2c_pca_pf_data *i2c; + struct resource *res; + struct i2c_pca9564_pf_platform_data *platform_data = + pdev->dev.platform_data; + int ret = 0; + int irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + /* If irq is 0, we do polling. */ + + if (res == NULL) { + ret = -ENODEV; + goto e_print; + } + + if (!request_mem_region(res->start, res_len(res), res->name)) { + ret = -ENOMEM; + goto e_print; + } + + i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL); + if (!i2c) { + ret = -ENOMEM; + goto e_alloc; + } + + init_waitqueue_head(&i2c->wait); + + i2c->reg_base = ioremap(res->start, res_len(res)); + if (!i2c->reg_base) { + ret = -EIO; + goto e_remap; + } + i2c->io_base = res->start; + i2c->io_size = res_len(res); + i2c->irq = irq; + + i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; + i2c->adap.owner = THIS_MODULE; + snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564 at 0x%08lx", + (unsigned long) res->start); + i2c->adap.algo_data = &i2c->algo_data; + i2c->adap.dev.parent = &pdev->dev; + i2c->adap.timeout = platform_data->timeout; + + i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed; + i2c->algo_data.data = i2c; + + switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { + case IORESOURCE_MEM_32BIT: + i2c->algo_data.write_byte = i2c_pca_pf_writebyte32; + i2c->algo_data.read_byte = i2c_pca_pf_readbyte32; + break; + case IORESOURCE_MEM_16BIT: + i2c->algo_data.write_byte = i2c_pca_pf_writebyte16; + i2c->algo_data.read_byte = i2c_pca_pf_readbyte16; + break; + case IORESOURCE_MEM_8BIT: + default: + i2c->algo_data.write_byte = i2c_pca_pf_writebyte8; + i2c->algo_data.read_byte = i2c_pca_pf_readbyte8; + break; + } + + i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; + + i2c->gpio = platform_data->gpio; + i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; + + /* Use gpio_is_valid() when in mainline */ + if (i2c->gpio > -1) { + ret = gpio_request(i2c->gpio, i2c->adap.name); + if (ret == 0) { + gpio_direction_output(i2c->gpio, 1); + i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; + } else { + printk(KERN_WARNING "%s: Registering gpio failed!\n", + i2c->adap.name); + i2c->gpio = ret; + } + } + + if (irq) { + ret = request_irq(irq, i2c_pca_pf_handler, + IRQF_TRIGGER_FALLING, i2c->adap.name, i2c); + if (ret) + goto e_reqirq; + } + + if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) { + ret = -ENODEV; + goto e_adapt; + } + + platform_set_drvdata(pdev, i2c); + + printk(KERN_INFO "%s registered.\n", i2c->adap.name); + + return 0; + +e_adapt: + if (irq) + free_irq(irq, i2c); +e_reqirq: + if (i2c->gpio > -1) + gpio_free(i2c->gpio); + + iounmap(i2c->reg_base); +e_remap: + kfree(i2c); +e_alloc: + release_mem_region(res->start, res_len(res)); +e_print: + printk(KERN_ERR "Registering PCA9564 FAILED! (%d)\n", ret); + return ret; +} + +static int __devexit i2c_pca_pf_remove(struct platform_device *pdev) +{ + struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); + + i2c_del_adapter(&i2c->adap); + + if (i2c->irq) + free_irq(i2c->irq, i2c); + + if (i2c->gpio > -1) + gpio_free(i2c->gpio); + + iounmap(i2c->reg_base); + release_mem_region(i2c->io_base, i2c->io_size); + kfree(i2c); + + return 0; +} + +static struct platform_driver i2c_pca_pf_driver = { + .probe = i2c_pca_pf_probe, + .remove = __devexit_p(i2c_pca_pf_remove), + .driver = { + .name = "i2c-pca-platform", + .owner = THIS_MODULE, + }, +}; + +static int __init i2c_pca_pf_init(void) +{ + return platform_driver_register(&i2c_pca_pf_driver); +} + +static void __exit i2c_pca_pf_exit(void) +{ + platform_driver_unregister(&i2c_pca_pf_driver); +} + +MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); +MODULE_DESCRIPTION("I2C-PCA9564 platform driver"); +MODULE_LICENSE("GPL"); + +module_init(i2c_pca_pf_init); +module_exit(i2c_pca_pf_exit); + diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index b03af5653c6..63b3e2c11cf 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -467,7 +467,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd( (cmd->read_len == 0 || cmd->write_len == 0))) { dev_err(&pmcmsptwi_adapter.dev, "%s: Cannot transfer less than 1 byte\n", - __FUNCTION__); + __func__); return -EINVAL; } @@ -475,7 +475,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd( cmd->write_len > MSP_MAX_BYTES_PER_RW) { dev_err(&pmcmsptwi_adapter.dev, "%s: Cannot transfer more than %d bytes\n", - __FUNCTION__, MSP_MAX_BYTES_PER_RW); + __func__, MSP_MAX_BYTES_PER_RW); return -EINVAL; } @@ -627,6 +627,9 @@ static struct i2c_adapter pmcmsptwi_adapter = { .name = DRV_NAME, }; +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:" DRV_NAME); + static struct platform_driver pmcmsptwi_driver = { .probe = pmcmsptwi_probe, .remove = __devexit_p(pmcmsptwi_remove), diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index f8d0dff0de7..1ca21084ffc 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -76,7 +76,7 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap) { struct i2c_pnx_algo_data *alg_data = adap->algo_data; - dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __FUNCTION__, + dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __func__, slave_addr, alg_data->mif.mode); /* Check for 7 bit slave addresses only */ @@ -110,14 +110,14 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap) iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); - dev_dbg(&adap->dev, "%s(): sending %#x\n", __FUNCTION__, + dev_dbg(&adap->dev, "%s(): sending %#x\n", __func__, (slave_addr << 1) | start_bit | alg_data->mif.mode); /* Write the slave address, START bit and R/W bit */ iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode, I2C_REG_TX(alg_data)); - dev_dbg(&adap->dev, "%s(): exit\n", __FUNCTION__); + dev_dbg(&adap->dev, "%s(): exit\n", __func__); return 0; } @@ -135,7 +135,7 @@ static void i2c_pnx_stop(struct i2c_adapter *adap) long timeout = 1000; dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); /* Write a STOP bit to TX FIFO */ iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data)); @@ -149,7 +149,7 @@ static void i2c_pnx_stop(struct i2c_adapter *adap) } dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); } /** @@ -164,7 +164,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap) u32 val; dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); if (alg_data->mif.len > 0) { /* We still have something to talk about... */ @@ -179,7 +179,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap) alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); - dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __FUNCTION__, + dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__, val, alg_data->mif.len + 1); if (alg_data->mif.len == 0) { @@ -197,7 +197,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap) del_timer_sync(&alg_data->mif.timer); dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n", - __FUNCTION__); + __func__); complete(&alg_data->mif.complete); } @@ -213,13 +213,13 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap) /* Stop timer. */ del_timer_sync(&alg_data->mif.timer); dev_dbg(&adap->dev, "%s(): Waking up xfer routine after " - "zero-xfer.\n", __FUNCTION__); + "zero-xfer.\n", __func__); complete(&alg_data->mif.complete); } dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } @@ -237,14 +237,14 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap) u32 ctl = 0; dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); /* Check, whether there is already data, * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { dev_dbg(&adap->dev, "%s(): Write dummy data to fill " - "Rx-fifo...\n", __FUNCTION__); + "Rx-fifo...\n", __func__); if (alg_data->mif.len == 1) { /* Last byte, do not acknowledge next rcv. */ @@ -276,7 +276,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap) if (alg_data->mif.len > 0) { val = ioread32(I2C_REG_RX(alg_data)); *alg_data->mif.buf++ = (u8) (val & 0xff); - dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __FUNCTION__, val, + dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val, alg_data->mif.len); alg_data->mif.len--; @@ -300,7 +300,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap) } dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } @@ -312,7 +312,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) struct i2c_pnx_algo_data *alg_data = adap->algo_data; dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", - __FUNCTION__, + __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data)), alg_data->mif.mode); @@ -336,7 +336,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) /* Slave did not acknowledge, generate a STOP */ dev_dbg(&adap->dev, "%s(): " "Slave did not acknowledge, generating a STOP.\n", - __FUNCTION__); + __func__); i2c_pnx_stop(adap); /* Disable master interrupts. */ @@ -375,7 +375,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data)), + __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); return IRQ_HANDLED; @@ -447,7 +447,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) u32 stat = ioread32(I2C_REG_STS(alg_data)); dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n", - __FUNCTION__, num, ioread32(I2C_REG_STS(alg_data))); + __func__, num, ioread32(I2C_REG_STS(alg_data))); bus_reset_if_active(adap); @@ -473,7 +473,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) alg_data->mif.ret = 0; alg_data->last = (i == num - 1); - dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __FUNCTION__, + dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); @@ -498,7 +498,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if (!(rc = alg_data->mif.ret)) completed++; dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n", - __FUNCTION__, rc); + __func__, rc); /* Clear TDI and AFI bits in case they are set. */ if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { @@ -522,7 +522,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) alg_data->mif.len = 0; dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n", - __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); + __func__, ioread32(I2C_REG_STS(alg_data))); if (completed != num) return ((rc < 0) ? rc : -EREMOTEIO); @@ -563,7 +563,7 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev) if (!i2c_pnx || !i2c_pnx->adapter) { dev_err(&pdev->dev, "%s: no platform data supplied\n", - __FUNCTION__); + __func__); ret = -EINVAL; goto out; } @@ -697,6 +697,7 @@ static void __exit i2c_adap_pnx_exit(void) MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pnx-i2c"); /* We need to make sure I2C is initialized before USB */ subsys_initcall(i2c_adap_pnx_init); diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 7813127649a..22f6d5c00d8 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -263,6 +263,9 @@ static int __devexit i2c_powermac_probe(struct platform_device *dev) } +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:i2c-powermac"); + static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, .remove = __devexit_p(i2c_powermac_remove), diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 6fd2d6a84ef..eb69fbadc9c 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -155,7 +155,7 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname) readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c))); } -#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __FUNCTION__) +#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__) #else #define i2c_debug 0 @@ -1132,6 +1132,7 @@ static void __exit i2c_adap_pxa_exit(void) } MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-i2c"); module_init(i2c_adap_pxa_init); module_exit(i2c_adap_pxa_exit); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index c44ada5f429..1305ef190fc 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -276,12 +276,12 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) switch (i2c->state) { case STATE_IDLE: - dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __FUNCTION__); + dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__); goto out; break; case STATE_STOP: - dev_err(i2c->dev, "%s: called in STATE_STOP\n", __FUNCTION__); + dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__); s3c24xx_i2c_disable_irq(i2c); goto out_ack; @@ -948,3 +948,4 @@ module_exit(i2c_adap_s3c_exit); MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:s3c2410-i2c"); diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c new file mode 100644 index 00000000000..5e0e254976d --- /dev/null +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -0,0 +1,577 @@ +/* + * I2C bus driver for the SH7760 I2C Interfaces. + * + * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com> + * + * licensed under the terms outlined in the file COPYING. + * + */ + +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <asm/clock.h> +#include <asm/i2c-sh7760.h> +#include <asm/io.h> + +/* register offsets */ +#define I2CSCR 0x0 /* slave ctrl */ +#define I2CMCR 0x4 /* master ctrl */ +#define I2CSSR 0x8 /* slave status */ +#define I2CMSR 0xC /* master status */ +#define I2CSIER 0x10 /* slave irq enable */ +#define I2CMIER 0x14 /* master irq enable */ +#define I2CCCR 0x18 /* clock dividers */ +#define I2CSAR 0x1c /* slave address */ +#define I2CMAR 0x20 /* master address */ +#define I2CRXTX 0x24 /* data port */ +#define I2CFCR 0x28 /* fifo control */ +#define I2CFSR 0x2C /* fifo status */ +#define I2CFIER 0x30 /* fifo irq enable */ +#define I2CRFDR 0x34 /* rx fifo count */ +#define I2CTFDR 0x38 /* tx fifo count */ + +#define REGSIZE 0x3C + +#define MCR_MDBS 0x80 /* non-fifo mode switch */ +#define MCR_FSCL 0x40 /* override SCL pin */ +#define MCR_FSDA 0x20 /* override SDA pin */ +#define MCR_OBPC 0x10 /* override pins */ +#define MCR_MIE 0x08 /* master if enable */ +#define MCR_TSBE 0x04 +#define MCR_FSB 0x02 /* force stop bit */ +#define MCR_ESG 0x01 /* en startbit gen. */ + +#define MSR_MNR 0x40 /* nack received */ +#define MSR_MAL 0x20 /* arbitration lost */ +#define MSR_MST 0x10 /* sent a stop */ +#define MSR_MDE 0x08 +#define MSR_MDT 0x04 +#define MSR_MDR 0x02 +#define MSR_MAT 0x01 /* slave addr xfer done */ + +#define MIE_MNRE 0x40 /* nack irq en */ +#define MIE_MALE 0x20 /* arblos irq en */ +#define MIE_MSTE 0x10 /* stop irq en */ +#define MIE_MDEE 0x08 +#define MIE_MDTE 0x04 +#define MIE_MDRE 0x02 +#define MIE_MATE 0x01 /* address sent irq en */ + +#define FCR_RFRST 0x02 /* reset rx fifo */ +#define FCR_TFRST 0x01 /* reset tx fifo */ + +#define FSR_TEND 0x04 /* last byte sent */ +#define FSR_RDF 0x02 /* rx fifo trigger */ +#define FSR_TDFE 0x01 /* tx fifo empty */ + +#define FIER_TEIE 0x04 /* tx fifo empty irq en */ +#define FIER_RXIE 0x02 /* rx fifo trig irq en */ +#define FIER_TXIE 0x01 /* tx fifo trig irq en */ + +#define FIFO_SIZE 16 + +struct cami2c { + void __iomem *iobase; + struct i2c_adapter adap; + + /* message processing */ + struct i2c_msg *msg; +#define IDF_SEND 1 +#define IDF_RECV 2 +#define IDF_STOP 4 + int flags; + +#define IDS_DONE 1 +#define IDS_ARBLOST 2 +#define IDS_NACK 4 + int status; + struct completion xfer_done; + + int irq; + struct resource *ioarea; +}; + +static inline void OUT32(struct cami2c *cam, int reg, unsigned long val) +{ + ctrl_outl(val, (unsigned long)cam->iobase + reg); +} + +static inline unsigned long IN32(struct cami2c *cam, int reg) +{ + return ctrl_inl((unsigned long)cam->iobase + reg); +} + +static irqreturn_t sh7760_i2c_irq(int irq, void *ptr) +{ + struct cami2c *id = ptr; + struct i2c_msg *msg = id->msg; + char *data = msg->buf; + unsigned long msr, fsr, fier, len; + + msr = IN32(id, I2CMSR); + fsr = IN32(id, I2CFSR); + + /* arbitration lost */ + if (msr & MSR_MAL) { + OUT32(id, I2CMCR, 0); + OUT32(id, I2CSCR, 0); + OUT32(id, I2CSAR, 0); + id->status |= IDS_DONE | IDS_ARBLOST; + goto out; + } + + if (msr & MSR_MNR) { + /* NACK handling is very screwed up. After receiving a + * NAK IRQ one has to wait a bit before writing to any + * registers, or the ctl will lock up. After that delay + * do a normal i2c stop. Then wait at least 1 ms before + * attempting another transfer or ctl will stop working + */ + udelay(100); /* wait or risk ctl hang */ + OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); + OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); + OUT32(id, I2CFIER, 0); + OUT32(id, I2CMIER, MIE_MSTE); + OUT32(id, I2CSCR, 0); + OUT32(id, I2CSAR, 0); + id->status |= IDS_NACK; + msr &= ~MSR_MAT; + fsr = 0; + /* In some cases the MST bit is also set. */ + } + + /* i2c-stop was sent */ + if (msr & MSR_MST) { + id->status |= IDS_DONE; + goto out; + } + + /* i2c slave addr was sent; set to "normal" operation */ + if (msr & MSR_MAT) + OUT32(id, I2CMCR, MCR_MIE); + + fier = IN32(id, I2CFIER); + + if (fsr & FSR_RDF) { + len = IN32(id, I2CRFDR); + if (msg->len <= len) { + if (id->flags & IDF_STOP) { + OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); + OUT32(id, I2CFIER, 0); + /* manual says: wait >= 0.5 SCL times */ + udelay(5); + /* next int should be MST */ + } else { + id->status |= IDS_DONE; + /* keep the RDF bit: ctrl holds SCL low + * until the setup for the next i2c_msg + * clears this bit. + */ + fsr &= ~FSR_RDF; + } + } + while (msg->len && len) { + *data++ = IN32(id, I2CRXTX); + msg->len--; + len--; + } + + if (msg->len) { + len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1 + : msg->len - 1; + + OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xf) << 4)); + } + + } else if (id->flags & IDF_SEND) { + if ((fsr & FSR_TEND) && (msg->len < 1)) { + if (id->flags & IDF_STOP) { + OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); + } else { + id->status |= IDS_DONE; + /* keep the TEND bit: ctl holds SCL low + * until the setup for the next i2c_msg + * clears this bit. + */ + fsr &= ~FSR_TEND; + } + } + if (fsr & FSR_TDFE) { + while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) { + OUT32(id, I2CRXTX, *data++); + msg->len--; + } + + if (msg->len < 1) { + fier &= ~FIER_TXIE; + OUT32(id, I2CFIER, fier); + } else { + len = (msg->len >= FIFO_SIZE) ? 2 : 0; + OUT32(id, I2CFCR, + FCR_RFRST | ((len & 3) << 2)); + } + } + } +out: + if (id->status & IDS_DONE) { + OUT32(id, I2CMIER, 0); + OUT32(id, I2CFIER, 0); + id->msg = NULL; + complete(&id->xfer_done); + } + /* clear status flags and ctrl resumes work */ + OUT32(id, I2CMSR, ~msr); + OUT32(id, I2CFSR, ~fsr); + OUT32(id, I2CSSR, 0); + + return IRQ_HANDLED; +} + + +/* prepare and start a master receive operation */ +static void sh7760_i2c_mrecv(struct cami2c *id) +{ + int len; + + id->flags |= IDF_RECV; + + /* set the slave addr reg; otherwise rcv wont work! */ + OUT32(id, I2CSAR, 0xfe); + OUT32(id, I2CMAR, (id->msg->addr << 1) | 1); + + /* adjust rx fifo trigger */ + if (id->msg->len >= FIFO_SIZE) + len = FIFO_SIZE - 1; /* trigger at fifo full */ + else + len = id->msg->len - 1; /* trigger before all received */ + + OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); + OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xF) << 4)); + + OUT32(id, I2CMSR, 0); + OUT32(id, I2CMCR, MCR_MIE | MCR_ESG); + OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE); + OUT32(id, I2CFIER, FIER_RXIE); +} + +/* prepare and start a master send operation */ +static void sh7760_i2c_msend(struct cami2c *id) +{ + int len; + + id->flags |= IDF_SEND; + + /* set the slave addr reg; otherwise xmit wont work! */ + OUT32(id, I2CSAR, 0xfe); + OUT32(id, I2CMAR, (id->msg->addr << 1) | 0); + + /* adjust tx fifo trigger */ + if (id->msg->len >= FIFO_SIZE) + len = 2; /* trig: 2 bytes left in TX fifo */ + else + len = 0; /* trig: 8 bytes left in TX fifo */ + + OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); + OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2)); + + while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) { + OUT32(id, I2CRXTX, *(id->msg->buf)); + (id->msg->len)--; + (id->msg->buf)++; + } + + OUT32(id, I2CMSR, 0); + OUT32(id, I2CMCR, MCR_MIE | MCR_ESG); + OUT32(id, I2CFSR, 0); + OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE); + OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0)); +} + +static inline int sh7760_i2c_busy_check(struct cami2c *id) +{ + return (IN32(id, I2CMCR) & MCR_FSDA); +} + +static int sh7760_i2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int num) +{ + struct cami2c *id = adap->algo_data; + int i, retr; + + if (sh7760_i2c_busy_check(id)) { + dev_err(&adap->dev, "sh7760-i2c%d: bus busy!\n", adap->nr); + return -EBUSY; + } + + i = 0; + while (i < num) { + retr = adap->retries; +retry: + id->flags = ((i == (num-1)) ? IDF_STOP : 0); + id->status = 0; + id->msg = msgs; + init_completion(&id->xfer_done); + + if (msgs->flags & I2C_M_RD) + sh7760_i2c_mrecv(id); + else + sh7760_i2c_msend(id); + + wait_for_completion(&id->xfer_done); + + if (id->status == 0) { + num = -EIO; + break; + } + + if (id->status & IDS_NACK) { + /* wait a bit or i2c module stops working */ + mdelay(1); + num = -EREMOTEIO; + break; + } + + if (id->status & IDS_ARBLOST) { + if (retr--) { + mdelay(2); + goto retry; + } + num = -EREMOTEIO; + break; + } + + msgs++; + i++; + } + + id->msg = NULL; + id->flags = 0; + id->status = 0; + + OUT32(id, I2CMCR, 0); + OUT32(id, I2CMSR, 0); + OUT32(id, I2CMIER, 0); + OUT32(id, I2CFIER, 0); + + /* reset slave module registers too: master mode enables slave + * module for receive ops (ack, data). Without this reset, + * eternal bus activity might be reported after NACK / ARBLOST. + */ + OUT32(id, I2CSCR, 0); + OUT32(id, I2CSAR, 0); + OUT32(id, I2CSSR, 0); + + return num; +} + +static u32 sh7760_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static const struct i2c_algorithm sh7760_i2c_algo = { + .master_xfer = sh7760_i2c_master_xfer, + .functionality = sh7760_i2c_func, +}; + +/* calculate CCR register setting for a desired scl clock. SCL clock is + * derived from I2C module clock (iclk) which in turn is derived from + * peripheral module clock (mclk, usually around 33MHz): + * iclk = mclk/(CDF + 1). iclk must be < 20MHz. + * scl = iclk/(SCGD*8 + 20). + */ +static int __devinit calc_CCR(unsigned long scl_hz) +{ + struct clk *mclk; + unsigned long mck, m1, dff, odff, iclk; + signed char cdf, cdfm; + int scgd, scgdm, scgds; + + mclk = clk_get(NULL, "module_clk"); + if (IS_ERR(mclk)) { + return PTR_ERR(mclk); + } else { + mck = mclk->rate; + clk_put(mclk); + } + + odff = scl_hz; + scgdm = cdfm = m1 = 0; + for (cdf = 3; cdf >= 0; cdf--) { + iclk = mck / (1 + cdf); + if (iclk >= 20000000) + continue; + scgds = ((iclk / scl_hz) - 20) >> 3; + for (scgd = scgds; (scgd < 63) && scgd <= scgds + 1; scgd++) { + m1 = iclk / (20 + (scgd << 3)); + dff = abs(scl_hz - m1); + if (dff < odff) { + odff = dff; + cdfm = cdf; + scgdm = scgd; + } + } + } + /* fail if more than 25% off of requested SCL */ + if (odff > (scl_hz >> 2)) + return -EINVAL; + + /* create a CCR register value */ + return ((scgdm << 2) | cdfm); +} + +static int __devinit sh7760_i2c_probe(struct platform_device *pdev) +{ + struct sh7760_i2c_platdata *pd; + struct resource *res; + struct cami2c *id; + int ret; + + pd = pdev->dev.platform_data; + if (!pd) { + dev_err(&pdev->dev, "no platform_data!\n"); + ret = -ENODEV; + goto out0; + } + + id = kzalloc(sizeof(struct cami2c), GFP_KERNEL); + if (!id) { + dev_err(&pdev->dev, "no mem for private data\n"); + ret = -ENOMEM; + goto out0; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no mmio resources\n"); + ret = -ENODEV; + goto out1; + } + + id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name); + if (!id->ioarea) { + dev_err(&pdev->dev, "mmio already reserved\n"); + ret = -EBUSY; + goto out1; + } + + id->iobase = ioremap(res->start, REGSIZE); + if (!id->iobase) { + dev_err(&pdev->dev, "cannot ioremap\n"); + ret = -ENODEV; + goto out2; + } + + id->irq = platform_get_irq(pdev, 0); + + id->adap.nr = pdev->id; + id->adap.algo = &sh7760_i2c_algo; + id->adap.class = I2C_CLASS_ALL; + id->adap.retries = 3; + id->adap.algo_data = id; + id->adap.dev.parent = &pdev->dev; + snprintf(id->adap.name, sizeof(id->adap.name), + "SH7760 I2C at %08lx", (unsigned long)res->start); + + OUT32(id, I2CMCR, 0); + OUT32(id, I2CMSR, 0); + OUT32(id, I2CMIER, 0); + OUT32(id, I2CMAR, 0); + OUT32(id, I2CSIER, 0); + OUT32(id, I2CSAR, 0); + OUT32(id, I2CSCR, 0); + OUT32(id, I2CSSR, 0); + OUT32(id, I2CFIER, 0); + OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); + OUT32(id, I2CFSR, 0); + + ret = calc_CCR(pd->speed_khz * 1000); + if (ret < 0) { + dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n", + pd->speed_khz); + goto out3; + } + OUT32(id, I2CCCR, ret); + + if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED, + SH7760_I2C_DEVNAME, id)) { + dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); + ret = -EBUSY; + goto out3; + } + + ret = i2c_add_numbered_adapter(&id->adap); + if (ret < 0) { + dev_err(&pdev->dev, "reg adap failed: %d\n", ret); + goto out4; + } + + platform_set_drvdata(pdev, id); + + dev_info(&pdev->dev, "%d kHz mmio %08x irq %d\n", + pd->speed_khz, res->start, id->irq); + + return 0; + +out4: + free_irq(id->irq, id); +out3: + iounmap(id->iobase); +out2: + release_resource(id->ioarea); + kfree(id->ioarea); +out1: + kfree(id); +out0: + return ret; +} + +static int __devexit sh7760_i2c_remove(struct platform_device *pdev) +{ + struct cami2c *id = platform_get_drvdata(pdev); + + i2c_del_adapter(&id->adap); + free_irq(id->irq, id); + iounmap(id->iobase); + release_resource(id->ioarea); + kfree(id->ioarea); + kfree(id); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver sh7760_i2c_drv = { + .driver = { + .name = SH7760_I2C_DEVNAME, + .owner = THIS_MODULE, + }, + .probe = sh7760_i2c_probe, + .remove = __devexit_p(sh7760_i2c_remove), +}; + +static int __init sh7760_i2c_init(void) +{ + return platform_driver_register(&sh7760_i2c_drv); +} + +static void __exit sh7760_i2c_exit(void) +{ + platform_driver_unregister(&sh7760_i2c_drv); +} + +module_init(sh7760_i2c_init); +module_exit(sh7760_i2c_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SH7760 I2C bus driver"); +MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>"); diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c new file mode 100644 index 00000000000..840e634fa31 --- /dev/null +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -0,0 +1,500 @@ +/* + * SuperH Mobile I2C Controller + * + * Copyright (C) 2008 Magnus Damm + * + * Portions of the code based on out-of-tree driver i2c-sh7343.c + * Copyright (c) 2006 Carlos Munoz <carlos@kenati.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> + +enum sh_mobile_i2c_op { + OP_START = 0, + OP_TX_ONLY, + OP_TX_STOP, + OP_TX_TO_RX, + OP_RX_ONLY, + OP_RX_STOP, +}; + +struct sh_mobile_i2c_data { + struct device *dev; + void __iomem *reg; + struct i2c_adapter adap; + + struct clk *clk; + u_int8_t iccl; + u_int8_t icch; + + spinlock_t lock; + wait_queue_head_t wait; + struct i2c_msg *msg; + int pos; + int sr; +}; + +#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */ + +/* Register offsets */ +#define ICDR(pd) (pd->reg + 0x00) +#define ICCR(pd) (pd->reg + 0x04) +#define ICSR(pd) (pd->reg + 0x08) +#define ICIC(pd) (pd->reg + 0x0c) +#define ICCL(pd) (pd->reg + 0x10) +#define ICCH(pd) (pd->reg + 0x14) + +/* Register bits */ +#define ICCR_ICE 0x80 +#define ICCR_RACK 0x40 +#define ICCR_TRS 0x10 +#define ICCR_BBSY 0x04 +#define ICCR_SCP 0x01 + +#define ICSR_SCLM 0x80 +#define ICSR_SDAM 0x40 +#define SW_DONE 0x20 +#define ICSR_BUSY 0x10 +#define ICSR_AL 0x08 +#define ICSR_TACK 0x04 +#define ICSR_WAIT 0x02 +#define ICSR_DTE 0x01 + +#define ICIC_ALE 0x08 +#define ICIC_TACKE 0x04 +#define ICIC_WAITE 0x02 +#define ICIC_DTEE 0x01 + +static void activate_ch(struct sh_mobile_i2c_data *pd) +{ + /* Make sure the clock is enabled */ + clk_enable(pd->clk); + + /* Enable channel and configure rx ack */ + iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); + + /* Mask all interrupts */ + iowrite8(0, ICIC(pd)); + + /* Set the clock */ + iowrite8(pd->iccl, ICCL(pd)); + iowrite8(pd->icch, ICCH(pd)); +} + +static void deactivate_ch(struct sh_mobile_i2c_data *pd) +{ + /* Clear/disable interrupts */ + iowrite8(0, ICSR(pd)); + iowrite8(0, ICIC(pd)); + + /* Disable channel */ + iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd)); + + /* Disable clock */ + clk_disable(pd->clk); +} + +static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, + enum sh_mobile_i2c_op op, unsigned char data) +{ + unsigned char ret = 0; + unsigned long flags; + + dev_dbg(pd->dev, "op %d, data in 0x%02x\n", op, data); + + spin_lock_irqsave(&pd->lock, flags); + + switch (op) { + case OP_START: + iowrite8(0x94, ICCR(pd)); + break; + case OP_TX_ONLY: + iowrite8(data, ICDR(pd)); + break; + case OP_TX_STOP: + iowrite8(data, ICDR(pd)); + iowrite8(0x90, ICCR(pd)); + iowrite8(ICIC_ALE | ICIC_TACKE, ICIC(pd)); + break; + case OP_TX_TO_RX: + iowrite8(data, ICDR(pd)); + iowrite8(0x81, ICCR(pd)); + break; + case OP_RX_ONLY: + ret = ioread8(ICDR(pd)); + break; + case OP_RX_STOP: + ret = ioread8(ICDR(pd)); + iowrite8(0xc0, ICCR(pd)); + break; + } + + spin_unlock_irqrestore(&pd->lock, flags); + + dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret); + return ret; +} + +static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) +{ + struct platform_device *dev = dev_id; + struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); + struct i2c_msg *msg = pd->msg; + unsigned char data, sr; + int wakeup = 0; + + sr = ioread8(ICSR(pd)); + pd->sr |= sr; + + dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, + (msg->flags & I2C_M_RD) ? "read" : "write", + pd->pos, msg->len); + + if (sr & (ICSR_AL | ICSR_TACK)) { + iowrite8(0, ICIC(pd)); /* disable interrupts */ + wakeup = 1; + goto do_wakeup; + } + + if (pd->pos == msg->len) { + i2c_op(pd, OP_RX_ONLY, 0); + wakeup = 1; + goto do_wakeup; + } + + if (pd->pos == -1) { + data = (msg->addr & 0x7f) << 1; + data |= (msg->flags & I2C_M_RD) ? 1 : 0; + } else + data = msg->buf[pd->pos]; + + if ((pd->pos == -1) || !(msg->flags & I2C_M_RD)) { + if (msg->flags & I2C_M_RD) + i2c_op(pd, OP_TX_TO_RX, data); + else if (pd->pos == (msg->len - 1)) { + i2c_op(pd, OP_TX_STOP, data); + wakeup = 1; + } else + i2c_op(pd, OP_TX_ONLY, data); + } else { + if (pd->pos == (msg->len - 1)) + data = i2c_op(pd, OP_RX_STOP, 0); + else + data = i2c_op(pd, OP_RX_ONLY, 0); + + msg->buf[pd->pos] = data; + } + pd->pos++; + + do_wakeup: + if (wakeup) { + pd->sr |= SW_DONE; + wake_up(&pd->wait); + } + + return IRQ_HANDLED; +} + +static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg) +{ + /* Initialize channel registers */ + iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd)); + + /* Enable channel and configure rx ack */ + iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); + + /* Set the clock */ + iowrite8(pd->iccl, ICCL(pd)); + iowrite8(pd->icch, ICCH(pd)); + + pd->msg = usr_msg; + pd->pos = -1; + pd->sr = 0; + + /* Enable all interrupts except wait */ + iowrite8(ioread8(ICIC(pd)) | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, + ICIC(pd)); + return 0; +} + +static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); + struct i2c_msg *msg; + int err = 0; + u_int8_t val; + int i, k, retry_count; + + activate_ch(pd); + + /* Process all messages */ + for (i = 0; i < num; i++) { + msg = &msgs[i]; + + err = start_ch(pd, msg); + if (err) + break; + + i2c_op(pd, OP_START, 0); + + /* The interrupt handler takes care of the rest... */ + k = wait_event_timeout(pd->wait, + pd->sr & (ICSR_TACK | SW_DONE), + 5 * HZ); + if (!k) + dev_err(pd->dev, "Transfer request timed out\n"); + + retry_count = 10; +again: + val = ioread8(ICSR(pd)); + + dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); + + if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) { + err = -EIO; + break; + } + + /* the interrupt handler may wake us up before the + * transfer is finished, so poll the hardware + * until we're done. + */ + + if (!(!(val & ICSR_BUSY) && (val & ICSR_SCLM) && + (val & ICSR_SDAM))) { + msleep(1); + if (retry_count--) + goto again; + + err = -EIO; + dev_err(pd->dev, "Polling timed out\n"); + break; + } + } + + deactivate_ch(pd); + + if (!err) + err = num; + return err; +} + +static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm sh_mobile_i2c_algorithm = { + .functionality = sh_mobile_i2c_func, + .master_xfer = sh_mobile_i2c_xfer, +}; + +static void sh_mobile_i2c_setup_channel(struct platform_device *dev) +{ + struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); + unsigned long peripheral_clk = clk_get_rate(pd->clk); + u_int32_t num; + u_int32_t denom; + u_int32_t tmp; + + spin_lock_init(&pd->lock); + init_waitqueue_head(&pd->wait); + + /* Calculate the value for iccl. From the data sheet: + * iccl = (p clock / transfer rate) * (L / (L + H)) + * where L and H are the SCL low/high ratio (5/4 in this case). + * We also round off the result. + */ + num = peripheral_clk * 5; + denom = NORMAL_SPEED * 9; + tmp = num * 10 / denom; + if (tmp % 10 >= 5) + pd->iccl = (u_int8_t)((num/denom) + 1); + else + pd->iccl = (u_int8_t)(num/denom); + + /* Calculate the value for icch. From the data sheet: + icch = (p clock / transfer rate) * (H / (L + H)) */ + num = peripheral_clk * 4; + tmp = num * 10 / denom; + if (tmp % 10 >= 5) + pd->icch = (u_int8_t)((num/denom) + 1); + else + pd->icch = (u_int8_t)(num/denom); +} + +static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook) +{ + struct resource *res; + int ret = -ENXIO; + int q, m; + int k = 0; + int n = 0; + + while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) { + for (n = res->start; hook && n <= res->end; n++) { + if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED, + dev->dev.bus_id, dev)) + goto rollback; + } + k++; + } + + if (hook) + return k > 0 ? 0 : -ENOENT; + + k--; + ret = 0; + + rollback: + for (q = k; k >= 0; k--) { + for (m = n; m >= res->start; m--) + free_irq(m, dev); + + res = platform_get_resource(dev, IORESOURCE_IRQ, k - 1); + m = res->end; + } + + return ret; +} + +static int sh_mobile_i2c_probe(struct platform_device *dev) +{ + struct sh_mobile_i2c_data *pd; + struct i2c_adapter *adap; + struct resource *res; + int size; + int ret; + + pd = kzalloc(sizeof(struct sh_mobile_i2c_data), GFP_KERNEL); + if (pd == NULL) { + dev_err(&dev->dev, "cannot allocate private data\n"); + return -ENOMEM; + } + + pd->clk = clk_get(&dev->dev, "peripheral_clk"); + if (IS_ERR(pd->clk)) { + dev_err(&dev->dev, "cannot get peripheral clock\n"); + ret = PTR_ERR(pd->clk); + goto err; + } + + ret = sh_mobile_i2c_hook_irqs(dev, 1); + if (ret) { + dev_err(&dev->dev, "cannot request IRQ\n"); + goto err_clk; + } + + pd->dev = &dev->dev; + platform_set_drvdata(dev, pd); + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&dev->dev, "cannot find IO resource\n"); + ret = -ENOENT; + goto err_irq; + } + + size = (res->end - res->start) + 1; + + pd->reg = ioremap(res->start, size); + if (pd->reg == NULL) { + dev_err(&dev->dev, "cannot map IO\n"); + ret = -ENXIO; + goto err_irq; + } + + /* setup the private data */ + adap = &pd->adap; + i2c_set_adapdata(adap, pd); + + adap->owner = THIS_MODULE; + adap->algo = &sh_mobile_i2c_algorithm; + adap->dev.parent = &dev->dev; + adap->retries = 5; + adap->nr = dev->id; + + strlcpy(adap->name, dev->name, sizeof(adap->name)); + + sh_mobile_i2c_setup_channel(dev); + + ret = i2c_add_numbered_adapter(adap); + if (ret < 0) { + dev_err(&dev->dev, "cannot add numbered adapter\n"); + goto err_all; + } + + return 0; + + err_all: + iounmap(pd->reg); + err_irq: + sh_mobile_i2c_hook_irqs(dev, 0); + err_clk: + clk_put(pd->clk); + err: + kfree(pd); + return ret; +} + +static int sh_mobile_i2c_remove(struct platform_device *dev) +{ + struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); + + i2c_del_adapter(&pd->adap); + iounmap(pd->reg); + sh_mobile_i2c_hook_irqs(dev, 0); + clk_put(pd->clk); + kfree(pd); + return 0; +} + +static struct platform_driver sh_mobile_i2c_driver = { + .driver = { + .name = "i2c-sh_mobile", + .owner = THIS_MODULE, + }, + .probe = sh_mobile_i2c_probe, + .remove = sh_mobile_i2c_remove, +}; + +static int __init sh_mobile_i2c_adap_init(void) +{ + return platform_driver_register(&sh_mobile_i2c_driver); +} + +static void __exit sh_mobile_i2c_adap_exit(void) +{ + platform_driver_unregister(&sh_mobile_i2c_driver); +} + +module_init(sh_mobile_i2c_adap_init); +module_exit(sh_mobile_i2c_adap_exit); + +MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index 10af8d31e12..042fda295f3 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -159,6 +159,9 @@ static int simtec_i2c_remove(struct platform_device *dev) /* device driver */ +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:simtec-i2c"); + static struct platform_driver simtec_i2c_driver = { .driver = { .name = "simtec-i2c", diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c index 081d9578ce1..4678babd3ce 100644 --- a/drivers/i2c/busses/i2c-versatile.c +++ b/drivers/i2c/busses/i2c-versatile.c @@ -151,3 +151,4 @@ module_exit(i2c_versatile_exit); MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:versatile-i2c"); diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index f5e7a70da83..61abe0f3325 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -527,7 +527,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base, if (iface == NULL) return -ENOMEM; - if (request_region(base, 8, iface->adapter.name) == 0) { + if (!request_region(base, 8, iface->adapter.name)) { printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); rc = -EBUSY; diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c index 2a3160153f5..b1b45dddb17 100644 --- a/drivers/i2c/chips/isp1301_omap.c +++ b/drivers/i2c/chips/isp1301_omap.c @@ -658,7 +658,7 @@ pulldown: OTG_CTRL_REG |= OTG_PULLUP; } - check_state(isp, __FUNCTION__); + check_state(isp, __func__); dump_regs(isp, "otg->isp1301"); } @@ -782,7 +782,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp) if (otg_ctrl & OTG_DRIVER_SEL) { switch (isp->otg.state) { case OTG_STATE_A_IDLE: - b_idle(isp, __FUNCTION__); + b_idle(isp, __func__); break; default: break; @@ -826,7 +826,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp) isp->otg.host->otg_port); } - check_state(isp, __FUNCTION__); + check_state(isp, __func__); return ret; } @@ -837,7 +837,7 @@ static int otg_init(struct isp1301 *isp) if (!otg_dev) return -ENODEV; - dump_regs(isp, __FUNCTION__); + dump_regs(isp, __func__); /* some of these values are board-specific... */ OTG_SYSCON_2_REG |= OTG_EN /* for B-device: */ @@ -853,9 +853,9 @@ static int otg_init(struct isp1301 *isp) update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE)); update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); - check_state(isp, __FUNCTION__); + check_state(isp, __func__); pr_debug("otg: %s, %s %06x\n", - state_name(isp), __FUNCTION__, OTG_CTRL_REG); + state_name(isp), __func__, OTG_CTRL_REG); OTG_IRQ_EN_REG = DRIVER_SWITCH | OPRT_CHG | B_SRP_TMROUT | B_HNP_FAIL @@ -1041,11 +1041,11 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat) OTG1_DP_PULLDOWN); isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLUP); - dump_regs(isp, __FUNCTION__); + dump_regs(isp, __func__); #endif /* FALLTHROUGH */ case OTG_STATE_B_SRP_INIT: - b_idle(isp, __FUNCTION__); + b_idle(isp, __func__); OTG_CTRL_REG &= OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; /* FALLTHROUGH */ case OTG_STATE_B_IDLE: @@ -1077,7 +1077,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat) */ update_otg1(isp, isp_stat); update_otg2(isp, isp_bstat); - check_state(isp, __FUNCTION__); + check_state(isp, __func__); #endif dump_regs(isp, "isp1301->otg"); @@ -1310,7 +1310,7 @@ isp1301_set_host(struct otg_transceiver *otg, struct usb_bus *host) */ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV); - dump_regs(isp, __FUNCTION__); + dump_regs(isp, __func__); return 0; @@ -1365,7 +1365,7 @@ isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget) isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING, INTR_VBUS_VLD); dev_info(&isp->client.dev, "B-Peripheral sessions ok\n"); - dump_regs(isp, __FUNCTION__); + dump_regs(isp, __func__); /* If this has a Mini-AB connector, this mode is highly * nonstandard ... but can be handy for testing, so long @@ -1416,7 +1416,7 @@ isp1301_start_srp(struct otg_transceiver *dev) pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), OTG_CTRL_REG); #ifdef CONFIG_USB_OTG - check_state(isp, __FUNCTION__); + check_state(isp, __func__); #endif return 0; } @@ -1463,7 +1463,7 @@ isp1301_start_hnp(struct otg_transceiver *dev) } pr_debug("otg: HNP %s, %06x ...\n", state_name(isp), OTG_CTRL_REG); - check_state(isp, __FUNCTION__); + check_state(isp, __func__); return 0; #else /* srp-only */ @@ -1601,7 +1601,7 @@ fail2: update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); #endif - dump_regs(isp, __FUNCTION__); + dump_regs(isp, __func__); #ifdef VERBOSE mod_timer(&isp->timer, jiffies + TIMER_JIFFIES); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index e186df65711..6c7fa8d53c0 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1506,7 +1506,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr, read_write = I2C_SMBUS_READ; if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "%s called with invalid " - "block proc call size (%d)\n", __FUNCTION__, + "block proc call size (%d)\n", __func__, data->block[0]); return -1; } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 393e679d9fa..d34c14c81c2 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -200,16 +200,176 @@ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) return device_for_each_child(&adapter->dev, &addr, i2cdev_check); } -static int i2cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client, + unsigned long arg) { - struct i2c_client *client = (struct i2c_client *)file->private_data; struct i2c_rdwr_ioctl_data rdwr_arg; - struct i2c_smbus_ioctl_data data_arg; - union i2c_smbus_data temp; struct i2c_msg *rdwr_pa; u8 __user **data_ptrs; - int i,datasize,res; + int i, res; + + if (copy_from_user(&rdwr_arg, + (struct i2c_rdwr_ioctl_data __user *)arg, + sizeof(rdwr_arg))) + return -EFAULT; + + /* Put an arbitrary limit on the number of messages that can + * be sent at once */ + if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS) + return -EINVAL; + + rdwr_pa = (struct i2c_msg *) + kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), + GFP_KERNEL); + if (!rdwr_pa) + return -ENOMEM; + + if (copy_from_user(rdwr_pa, rdwr_arg.msgs, + rdwr_arg.nmsgs * sizeof(struct i2c_msg))) { + kfree(rdwr_pa); + return -EFAULT; + } + + data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL); + if (data_ptrs == NULL) { + kfree(rdwr_pa); + return -ENOMEM; + } + + res = 0; + for (i = 0; i < rdwr_arg.nmsgs; i++) { + /* Limit the size of the message to a sane amount; + * and don't let length change either. */ + if ((rdwr_pa[i].len > 8192) || + (rdwr_pa[i].flags & I2C_M_RECV_LEN)) { + res = -EINVAL; + break; + } + data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf; + rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL); + if (rdwr_pa[i].buf == NULL) { + res = -ENOMEM; + break; + } + if (copy_from_user(rdwr_pa[i].buf, data_ptrs[i], + rdwr_pa[i].len)) { + ++i; /* Needs to be kfreed too */ + res = -EFAULT; + break; + } + } + if (res < 0) { + int j; + for (j = 0; j < i; ++j) + kfree(rdwr_pa[j].buf); + kfree(data_ptrs); + kfree(rdwr_pa); + return res; + } + + res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs); + while (i-- > 0) { + if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) { + if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf, + rdwr_pa[i].len)) + res = -EFAULT; + } + kfree(rdwr_pa[i].buf); + } + kfree(data_ptrs); + kfree(rdwr_pa); + return res; +} + +static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, + unsigned long arg) +{ + struct i2c_smbus_ioctl_data data_arg; + union i2c_smbus_data temp; + int datasize, res; + + if (copy_from_user(&data_arg, + (struct i2c_smbus_ioctl_data __user *) arg, + sizeof(struct i2c_smbus_ioctl_data))) + return -EFAULT; + if ((data_arg.size != I2C_SMBUS_BYTE) && + (data_arg.size != I2C_SMBUS_QUICK) && + (data_arg.size != I2C_SMBUS_BYTE_DATA) && + (data_arg.size != I2C_SMBUS_WORD_DATA) && + (data_arg.size != I2C_SMBUS_PROC_CALL) && + (data_arg.size != I2C_SMBUS_BLOCK_DATA) && + (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) && + (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) && + (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) { + dev_dbg(&client->adapter->dev, + "size out of range (%x) in ioctl I2C_SMBUS.\n", + data_arg.size); + return -EINVAL; + } + /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, + so the check is valid if size==I2C_SMBUS_QUICK too. */ + if ((data_arg.read_write != I2C_SMBUS_READ) && + (data_arg.read_write != I2C_SMBUS_WRITE)) { + dev_dbg(&client->adapter->dev, + "read_write out of range (%x) in ioctl I2C_SMBUS.\n", + data_arg.read_write); + return -EINVAL; + } + + /* Note that command values are always valid! */ + + if ((data_arg.size == I2C_SMBUS_QUICK) || + ((data_arg.size == I2C_SMBUS_BYTE) && + (data_arg.read_write == I2C_SMBUS_WRITE))) + /* These are special: we do not use data */ + return i2c_smbus_xfer(client->adapter, client->addr, + client->flags, data_arg.read_write, + data_arg.command, data_arg.size, NULL); + + if (data_arg.data == NULL) { + dev_dbg(&client->adapter->dev, + "data is NULL pointer in ioctl I2C_SMBUS.\n"); + return -EINVAL; + } + + if ((data_arg.size == I2C_SMBUS_BYTE_DATA) || + (data_arg.size == I2C_SMBUS_BYTE)) + datasize = sizeof(data_arg.data->byte); + else if ((data_arg.size == I2C_SMBUS_WORD_DATA) || + (data_arg.size == I2C_SMBUS_PROC_CALL)) + datasize = sizeof(data_arg.data->word); + else /* size == smbus block, i2c block, or block proc. call */ + datasize = sizeof(data_arg.data->block); + + if ((data_arg.size == I2C_SMBUS_PROC_CALL) || + (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || + (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) || + (data_arg.read_write == I2C_SMBUS_WRITE)) { + if (copy_from_user(&temp, data_arg.data, datasize)) + return -EFAULT; + } + if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) { + /* Convert old I2C block commands to the new + convention. This preserves binary compatibility. */ + data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA; + if (data_arg.read_write == I2C_SMBUS_READ) + temp.block[0] = I2C_SMBUS_BLOCK_MAX; + } + res = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + data_arg.read_write, data_arg.command, data_arg.size, &temp); + if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) || + (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || + (data_arg.read_write == I2C_SMBUS_READ))) { + if (copy_to_user(data_arg.data, &temp, datasize)) + return -EFAULT; + } + return res; +} + +static int i2cdev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct i2c_client *client = (struct i2c_client *)file->private_data; unsigned long funcs; dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", @@ -253,164 +413,11 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file, return put_user(funcs, (unsigned long __user *)arg); case I2C_RDWR: - if (copy_from_user(&rdwr_arg, - (struct i2c_rdwr_ioctl_data __user *)arg, - sizeof(rdwr_arg))) - return -EFAULT; - - /* Put an arbitrary limit on the number of messages that can - * be sent at once */ - if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS) - return -EINVAL; - - rdwr_pa = (struct i2c_msg *) - kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), - GFP_KERNEL); - - if (rdwr_pa == NULL) return -ENOMEM; - - if (copy_from_user(rdwr_pa, rdwr_arg.msgs, - rdwr_arg.nmsgs * sizeof(struct i2c_msg))) { - kfree(rdwr_pa); - return -EFAULT; - } - - data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL); - if (data_ptrs == NULL) { - kfree(rdwr_pa); - return -ENOMEM; - } - - res = 0; - for( i=0; i<rdwr_arg.nmsgs; i++ ) { - /* Limit the size of the message to a sane amount; - * and don't let length change either. */ - if ((rdwr_pa[i].len > 8192) || - (rdwr_pa[i].flags & I2C_M_RECV_LEN)) { - res = -EINVAL; - break; - } - data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf; - rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL); - if(rdwr_pa[i].buf == NULL) { - res = -ENOMEM; - break; - } - if(copy_from_user(rdwr_pa[i].buf, - data_ptrs[i], - rdwr_pa[i].len)) { - ++i; /* Needs to be kfreed too */ - res = -EFAULT; - break; - } - } - if (res < 0) { - int j; - for (j = 0; j < i; ++j) - kfree(rdwr_pa[j].buf); - kfree(data_ptrs); - kfree(rdwr_pa); - return res; - } - - res = i2c_transfer(client->adapter, - rdwr_pa, - rdwr_arg.nmsgs); - while(i-- > 0) { - if( res>=0 && (rdwr_pa[i].flags & I2C_M_RD)) { - if(copy_to_user( - data_ptrs[i], - rdwr_pa[i].buf, - rdwr_pa[i].len)) { - res = -EFAULT; - } - } - kfree(rdwr_pa[i].buf); - } - kfree(data_ptrs); - kfree(rdwr_pa); - return res; + return i2cdev_ioctl_rdrw(client, arg); case I2C_SMBUS: - if (copy_from_user(&data_arg, - (struct i2c_smbus_ioctl_data __user *) arg, - sizeof(struct i2c_smbus_ioctl_data))) - return -EFAULT; - if ((data_arg.size != I2C_SMBUS_BYTE) && - (data_arg.size != I2C_SMBUS_QUICK) && - (data_arg.size != I2C_SMBUS_BYTE_DATA) && - (data_arg.size != I2C_SMBUS_WORD_DATA) && - (data_arg.size != I2C_SMBUS_PROC_CALL) && - (data_arg.size != I2C_SMBUS_BLOCK_DATA) && - (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) && - (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) && - (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) { - dev_dbg(&client->adapter->dev, - "size out of range (%x) in ioctl I2C_SMBUS.\n", - data_arg.size); - return -EINVAL; - } - /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, - so the check is valid if size==I2C_SMBUS_QUICK too. */ - if ((data_arg.read_write != I2C_SMBUS_READ) && - (data_arg.read_write != I2C_SMBUS_WRITE)) { - dev_dbg(&client->adapter->dev, - "read_write out of range (%x) in ioctl I2C_SMBUS.\n", - data_arg.read_write); - return -EINVAL; - } - - /* Note that command values are always valid! */ - - if ((data_arg.size == I2C_SMBUS_QUICK) || - ((data_arg.size == I2C_SMBUS_BYTE) && - (data_arg.read_write == I2C_SMBUS_WRITE))) - /* These are special: we do not use data */ - return i2c_smbus_xfer(client->adapter, client->addr, - client->flags, - data_arg.read_write, - data_arg.command, - data_arg.size, NULL); - - if (data_arg.data == NULL) { - dev_dbg(&client->adapter->dev, - "data is NULL pointer in ioctl I2C_SMBUS.\n"); - return -EINVAL; - } + return i2cdev_ioctl_smbus(client, arg); - if ((data_arg.size == I2C_SMBUS_BYTE_DATA) || - (data_arg.size == I2C_SMBUS_BYTE)) - datasize = sizeof(data_arg.data->byte); - else if ((data_arg.size == I2C_SMBUS_WORD_DATA) || - (data_arg.size == I2C_SMBUS_PROC_CALL)) - datasize = sizeof(data_arg.data->word); - else /* size == smbus block, i2c block, or block proc. call */ - datasize = sizeof(data_arg.data->block); - - if ((data_arg.size == I2C_SMBUS_PROC_CALL) || - (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || - (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) || - (data_arg.read_write == I2C_SMBUS_WRITE)) { - if (copy_from_user(&temp, data_arg.data, datasize)) - return -EFAULT; - } - if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) { - /* Convert old I2C block commands to the new - convention. This preserves binary compatibility. */ - data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA; - if (data_arg.read_write == I2C_SMBUS_READ) - temp.block[0] = I2C_SMBUS_BLOCK_MAX; - } - res = i2c_smbus_xfer(client->adapter,client->addr,client->flags, - data_arg.read_write, - data_arg.command,data_arg.size,&temp); - if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) || - (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || - (data_arg.read_write == I2C_SMBUS_READ))) { - if (copy_to_user(data_arg.data, &temp, datasize)) - return -EFAULT; - } - return res; case I2C_RETRIES: client->adapter->retries = arg; break; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index a3a6199639f..eb97c4113d7 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -1,6 +1,5 @@ menuconfig NEW_LEDS bool "LED Support" - depends on HAS_IOMEM help Say Y to enable Linux LED support. This allows control of supported LEDs from both userspace and optionally, by kernel events (triggers). diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c index af66f4f2830..4edc120a635 100644 --- a/drivers/mfd/htc-pasic3.c +++ b/drivers/mfd/htc-pasic3.c @@ -19,8 +19,6 @@ #include <linux/interrupt.h> #include <linux/mfd/htc-pasic3.h> -#include <asm/arch/pxa-regs.h> - struct pasic3_data { void __iomem *mapping; unsigned int bus_shift; @@ -30,7 +28,6 @@ struct pasic3_data { #define REG_ADDR 5 #define REG_DATA 6 -#define NUM_REGS 7 #define READ_MODE 0x80 diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index bb94ce78a6d..297a48f8544 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES driver (SCSI/ATA) which supports enclosures or a SCSI enclosure device (SES) to use these services. +config SGI_XP + tristate "Support communication between SGI SSIs" + depends on IA64_GENERIC || IA64_SGI_SN2 + select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 + select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 + ---help--- + An SGI machine can be divided into multiple Single System + Images which act independently of each other and have + hardware based memory protection from the others. Enabling + this feature will allow for direct communication between SSIs + based on a network adapter and DMA messaging. + endif # MISC_DEVICES diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 4581b253311..5914da43485 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o +obj-$(CONFIG_SGI_XP) += sgi-xp/ diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile new file mode 100644 index 00000000000..b6e40a7958c --- /dev/null +++ b/drivers/misc/sgi-xp/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for SGI's XP devices. +# + +obj-$(CONFIG_SGI_XP) += xp.o +xp-y := xp_main.o xp_nofault.o + +obj-$(CONFIG_SGI_XP) += xpc.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o + +obj-$(CONFIG_SGI_XP) += xpnet.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h new file mode 100644 index 00000000000..5515234be86 --- /dev/null +++ b/drivers/misc/sgi-xp/xp.h @@ -0,0 +1,463 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * External Cross Partition (XP) structures and defines. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XP_H +#define _DRIVERS_MISC_SGIXP_XP_H + +#include <linux/cache.h> +#include <linux/hardirq.h> +#include <linux/mutex.h> +#include <asm/sn/types.h> +#include <asm/sn/bte.h> + +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) BUG_ON(condition) +#else +#define DBUG_ON(condition) +#endif + +/* + * Define the maximum number of logically defined partitions the system + * can support. It is constrained by the maximum number of hardware + * partitionable regions. The term 'region' in this context refers to the + * minimum number of nodes that can comprise an access protection grouping. + * The access protection is in regards to memory, IPI and IOI. + * + * The maximum number of hardware partitionable regions is equal to the + * maximum number of nodes in the entire system divided by the minimum number + * of nodes that comprise an access protection grouping. + */ +#define XP_MAX_PARTITIONS 64 + +/* + * Define the number of u64s required to represent all the C-brick nasids + * as a bitmap. The cross-partition kernel modules deal only with + * C-brick nasids, thus the need for bitmaps which don't account for + * odd-numbered (non C-brick) nasids. + */ +#define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2) +#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) +#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) + +/* + * Wrapper for bte_copy() that should it return a failure status will retry + * the bte_copy() once in the hope that the failure was due to a temporary + * aberration (i.e., the link going down temporarily). + * + * src - physical address of the source of the transfer. + * vdst - virtual address of the destination of the transfer. + * len - number of bytes to transfer from source to destination. + * mode - see bte_copy() for definition. + * notification - see bte_copy() for definition. + * + * Note: xp_bte_copy() should never be called while holding a spinlock. + */ +static inline bte_result_t +xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) +{ + bte_result_t ret; + u64 pdst = ia64_tpa(vdst); + + /* + * Ensure that the physically mapped memory is contiguous. + * + * We do this by ensuring that the memory is from region 7 only. + * If the need should arise to use memory from one of the other + * regions, then modify the BUG_ON() statement to ensure that the + * memory from that region is always physically contiguous. + */ + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); + + ret = bte_copy(src, pdst, len, mode, notification); + if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { + if (!in_interrupt()) + cond_resched(); + + ret = bte_copy(src, pdst, len, mode, notification); + } + + return ret; +} + +/* + * XPC establishes channel connections between the local partition and any + * other partition that is currently up. Over these channels, kernel-level + * `users' can communicate with their counterparts on the other partitions. + * + * The maxinum number of channels is limited to eight. For performance reasons, + * the internal cross partition structures require sixteen bytes per channel, + * and eight allows all of this interface-shared info to fit in one cache line. + * + * XPC_NCHANNELS reflects the total number of channels currently defined. + * If the need for additional channels arises, one can simply increase + * XPC_NCHANNELS accordingly. If the day should come where that number + * exceeds the MAXIMUM number of channels allowed (eight), then one will need + * to make changes to the XPC code to allow for this. + */ +#define XPC_MEM_CHANNEL 0 /* memory channel number */ +#define XPC_NET_CHANNEL 1 /* network channel number */ + +#define XPC_NCHANNELS 2 /* #of defined channels */ +#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */ + +#if XPC_NCHANNELS > XPC_MAX_NCHANNELS +#error XPC_NCHANNELS exceeds MAXIMUM allowed. +#endif + +/* + * The format of an XPC message is as follows: + * + * +-------+--------------------------------+ + * | flags |////////////////////////////////| + * +-------+--------------------------------+ + * | message # | + * +----------------------------------------+ + * | payload (user-defined message) | + * | | + * : + * | | + * +----------------------------------------+ + * + * The size of the payload is defined by the user via xpc_connect(). A user- + * defined message resides in the payload area. + * + * The user should have no dealings with the message header, but only the + * message's payload. When a message entry is allocated (via xpc_allocate()) + * a pointer to the payload area is returned and not the actual beginning of + * the XPC message. The user then constructs a message in the payload area + * and passes that pointer as an argument on xpc_send() or xpc_send_notify(). + * + * The size of a message entry (within a message queue) must be a cacheline + * sized multiple in order to facilitate the BTE transfer of messages from one + * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user + * that wants to fit as many msg entries as possible in a given memory size + * (e.g. a memory page). + */ +struct xpc_msg { + u8 flags; /* FOR XPC INTERNAL USE ONLY */ + u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ + s64 number; /* FOR XPC INTERNAL USE ONLY */ + + u64 payload; /* user defined portion of message */ +}; + +#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) +#define XPC_MSG_SIZE(_payload_size) \ + L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) + +/* + * Define the return values and values passed to user's callout functions. + * (It is important to add new value codes at the end just preceding + * xpcUnknownReason, which must have the highest numerical value.) + */ +enum xpc_retval { + xpcSuccess = 0, + + xpcNotConnected, /* 1: channel is not connected */ + xpcConnected, /* 2: channel connected (opened) */ + xpcRETIRED1, /* 3: (formerly xpcDisconnected) */ + + xpcMsgReceived, /* 4: message received */ + xpcMsgDelivered, /* 5: message delivered and acknowledged */ + + xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */ + + xpcNoWait, /* 7: operation would require wait */ + xpcRetry, /* 8: retry operation */ + xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ + xpcInterrupted, /* 10: interrupted wait */ + + xpcUnequalMsgSizes, /* 11: message size disparity between sides */ + xpcInvalidAddress, /* 12: invalid address */ + + xpcNoMemory, /* 13: no memory available for XPC structures */ + xpcLackOfResources, /* 14: insufficient resources for operation */ + xpcUnregistered, /* 15: channel is not registered */ + xpcAlreadyRegistered, /* 16: channel is already registered */ + + xpcPartitionDown, /* 17: remote partition is down */ + xpcNotLoaded, /* 18: XPC module is not loaded */ + xpcUnloading, /* 19: this side is unloading XPC module */ + + xpcBadMagic, /* 20: XPC MAGIC string not found */ + + xpcReactivating, /* 21: remote partition was reactivated */ + + xpcUnregistering, /* 22: this side is unregistering channel */ + xpcOtherUnregistering, /* 23: other side is unregistering channel */ + + xpcCloneKThread, /* 24: cloning kernel thread */ + xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */ + + xpcNoHeartbeat, /* 26: remote partition has no heartbeat */ + + xpcPioReadError, /* 27: PIO read error */ + xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */ + + xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */ + xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */ + xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */ + xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */ + xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */ + xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */ + xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */ + xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */ + xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */ + xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */ + + xpcBadVersion, /* 39: bad version number */ + xpcVarsNotSet, /* 40: the XPC variables are not set up */ + xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ + xpcInvalidPartid, /* 42: invalid partition ID */ + xpcLocalPartid, /* 43: local partition ID */ + + xpcOtherGoingDown, /* 44: other side going down, reason unknown */ + xpcSystemGoingDown, /* 45: system is going down, reason unknown */ + xpcSystemHalt, /* 46: system is being halted */ + xpcSystemReboot, /* 47: system is being rebooted */ + xpcSystemPoweroff, /* 48: system is being powered off */ + + xpcDisconnecting, /* 49: channel disconnecting (closing) */ + + xpcOpenCloseError, /* 50: channel open/close protocol error */ + + xpcDisconnected, /* 51: channel disconnected (closed) */ + + xpcBteSh2Start, /* 52: BTE CRB timeout */ + + /* 53: 0x1 BTE Error Response Short */ + xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT, + + /* 54: 0x2 BTE Error Response Long */ + xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG, + + /* 56: 0x4 BTE Error Response DSB */ + xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP, + + /* 60: 0x8 BTE Error Response Access */ + xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS, + + /* 68: 0x10 BTE Error CRB timeout */ + xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO, + + /* 84: 0x20 BTE Error NACK limit */ + xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT, + + /* 115: BTE end */ + xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, + + xpcUnknownReason /* 116: unknown reason - must be last in enum */ +}; + +/* + * Define the callout function types used by XPC to update the user on + * connection activity and state changes (via the user function registered by + * xpc_connect()) and to notify them of messages received and delivered (via + * the user function registered by xpc_send_notify()). + * + * The two function types are xpc_channel_func and xpc_notify_func and + * both share the following arguments, with the exception of "data", which + * only xpc_channel_func has. + * + * Arguments: + * + * reason - reason code. (See following table.) + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * data - pointer to optional data. (See following table.) + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_connect() or xpc_send_notify(). + * + * In the following table the "Optional Data" column applies to callouts made + * to functions registered by xpc_connect(). A "NA" in that column indicates + * that this reason code can be passed to functions registered by + * xpc_send_notify() (i.e. they don't have data arguments). + * + * Also, the first three reason codes in the following table indicate + * success, whereas the others indicate failure. When a failure reason code + * is received, one can assume that the channel is not connected. + * + * + * Reason Code | Cause | Optional Data + * =====================+================================+===================== + * xpcConnected | connection has been established| max #of entries + * | to the specified partition on | allowed in message + * | the specified channel | queue + * ---------------------+--------------------------------+--------------------- + * xpcMsgReceived | an XPC message arrived from | address of payload + * | the specified partition on the | + * | specified channel | [the user must call + * | | xpc_received() when + * | | finished with the + * | | payload] + * ---------------------+--------------------------------+--------------------- + * xpcMsgDelivered | notification that the message | NA + * | was delivered to the intended | + * | recipient and that they have | + * | acknowledged its receipt by | + * | calling xpc_received() | + * =====================+================================+===================== + * xpcUnequalMsgSizes | can't connect to the specified | NULL + * | partition on the specified | + * | channel because of mismatched | + * | message sizes | + * ---------------------+--------------------------------+--------------------- + * xpcNoMemory | insufficient memory avaiable | NULL + * | to allocate message queue | + * ---------------------+--------------------------------+--------------------- + * xpcLackOfResources | lack of resources to create | NULL + * | the necessary kthreads to | + * | support the channel | + * ---------------------+--------------------------------+--------------------- + * xpcUnregistering | this side's user has | NULL or NA + * | unregistered by calling | + * | xpc_disconnect() | + * ---------------------+--------------------------------+--------------------- + * xpcOtherUnregistering| the other side's user has | NULL or NA + * | unregistered by calling | + * | xpc_disconnect() | + * ---------------------+--------------------------------+--------------------- + * xpcNoHeartbeat | the other side's XPC is no | NULL or NA + * | longer heartbeating | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcUnloading | this side's XPC module is | NULL or NA + * | being unloaded | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcOtherUnloading | the other side's XPC module is | NULL or NA + * | is being unloaded | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA + * | error while sending an IPI | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcInvalidAddress | the address either received or | NULL or NA + * | sent by the specified partition| + * | is invalid | + * ---------------------+--------------------------------+--------------------- + * xpcBteNotAvailable | attempt to pull data from the | NULL or NA + * xpcBtePoisonError | specified partition over the | + * xpcBteWriteError | specified channel via a | + * xpcBteAccessError | bte_copy() failed | + * xpcBteTimeOutError | | + * xpcBteXtalkError | | + * xpcBteDirectoryError | | + * xpcBteGenericError | | + * xpcBteUnmappedError | | + * ---------------------+--------------------------------+--------------------- + * xpcUnknownReason | the specified channel to the | NULL or NA + * | specified partition was | + * | unavailable for unknown reasons| + * =====================+================================+===================== + */ + +typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid, + int ch_number, void *data, void *key); + +typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid, + int ch_number, void *key); + +/* + * The following is a registration entry. There is a global array of these, + * one per channel. It is used to record the connection registration made + * by the users of XPC. As long as a registration entry exists, for any + * partition that comes up, XPC will attempt to establish a connection on + * that channel. Notification that a connection has been made will occur via + * the xpc_channel_func function. + * + * The 'func' field points to the function to call when aynchronous + * notification is required for such events as: a connection established/lost, + * or an incoming message received, or an error condition encountered. A + * non-NULL 'func' field indicates that there is an active registration for + * the channel. + */ +struct xpc_registration { + struct mutex mutex; + xpc_channel_func func; /* function to call */ + void *key; /* pointer to user's key */ + u16 nentries; /* #of msg entries in local msg queue */ + u16 msg_size; /* message queue's message size */ + u32 assigned_limit; /* limit on #of assigned kthreads */ + u32 idle_limit; /* limit on #of idle kthreads */ +} ____cacheline_aligned; + +#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) + +/* the following are valid xpc_allocate() flags */ +#define XPC_WAIT 0 /* wait flag */ +#define XPC_NOWAIT 1 /* no wait flag */ + +struct xpc_interface { + void (*connect) (int); + void (*disconnect) (int); + enum xpc_retval (*allocate) (partid_t, int, u32, void **); + enum xpc_retval (*send) (partid_t, int, void *); + enum xpc_retval (*send_notify) (partid_t, int, void *, + xpc_notify_func, void *); + void (*received) (partid_t, int, void *); + enum xpc_retval (*partid_to_nasids) (partid_t, void *); +}; + +extern struct xpc_interface xpc_interface; + +extern void xpc_set_interface(void (*)(int), + void (*)(int), + enum xpc_retval (*)(partid_t, int, u32, void **), + enum xpc_retval (*)(partid_t, int, void *), + enum xpc_retval (*)(partid_t, int, void *, + xpc_notify_func, void *), + void (*)(partid_t, int, void *), + enum xpc_retval (*)(partid_t, void *)); +extern void xpc_clear_interface(void); + +extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, + u16, u32, u32); +extern void xpc_disconnect(int); + +static inline enum xpc_retval +xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) +{ + return xpc_interface.allocate(partid, ch_number, flags, payload); +} + +static inline enum xpc_retval +xpc_send(partid_t partid, int ch_number, void *payload) +{ + return xpc_interface.send(partid, ch_number, payload); +} + +static inline enum xpc_retval +xpc_send_notify(partid_t partid, int ch_number, void *payload, + xpc_notify_func func, void *key) +{ + return xpc_interface.send_notify(partid, ch_number, payload, func, key); +} + +static inline void +xpc_received(partid_t partid, int ch_number, void *payload) +{ + return xpc_interface.received(partid, ch_number, payload); +} + +static inline enum xpc_retval +xpc_partid_to_nasids(partid_t partid, void *nasids) +{ + return xpc_interface.partid_to_nasids(partid, nasids); +} + +extern u64 xp_nofault_PIOR_target; +extern int xp_nofault_PIOR(void *); +extern int xp_error_PIOR(void); + +#endif /* _DRIVERS_MISC_SGIXP_XP_H */ diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c new file mode 100644 index 00000000000..1fbf99bae96 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_main.c @@ -0,0 +1,279 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) base. + * + * XP provides a base from which its users can interact + * with XPC, yet not be dependent on XPC. + * + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <asm/sn/intr.h> +#include <asm/sn/sn_sal.h> +#include "xp.h" + +/* + * The export of xp_nofault_PIOR needs to happen here since it is defined + * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is + * defined here. + */ +EXPORT_SYMBOL_GPL(xp_nofault_PIOR); + +u64 xp_nofault_PIOR_target; +EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); + +/* + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level + * users of XPC. + */ +struct xpc_registration xpc_registrations[XPC_NCHANNELS]; +EXPORT_SYMBOL_GPL(xpc_registrations); + +/* + * Initialize the XPC interface to indicate that XPC isn't loaded. + */ +static enum xpc_retval +xpc_notloaded(void) +{ + return xpcNotLoaded; +} + +struct xpc_interface xpc_interface = { + (void (*)(int))xpc_notloaded, + (void (*)(int))xpc_notloaded, + (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, + (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, + (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) + xpc_notloaded, + (void (*)(partid_t, int, void *))xpc_notloaded, + (enum xpc_retval(*)(partid_t, void *))xpc_notloaded +}; +EXPORT_SYMBOL_GPL(xpc_interface); + +/* + * XPC calls this when it (the XPC module) has been loaded. + */ +void +xpc_set_interface(void (*connect) (int), + void (*disconnect) (int), + enum xpc_retval (*allocate) (partid_t, int, u32, void **), + enum xpc_retval (*send) (partid_t, int, void *), + enum xpc_retval (*send_notify) (partid_t, int, void *, + xpc_notify_func, void *), + void (*received) (partid_t, int, void *), + enum xpc_retval (*partid_to_nasids) (partid_t, void *)) +{ + xpc_interface.connect = connect; + xpc_interface.disconnect = disconnect; + xpc_interface.allocate = allocate; + xpc_interface.send = send; + xpc_interface.send_notify = send_notify; + xpc_interface.received = received; + xpc_interface.partid_to_nasids = partid_to_nasids; +} +EXPORT_SYMBOL_GPL(xpc_set_interface); + +/* + * XPC calls this when it (the XPC module) is being unloaded. + */ +void +xpc_clear_interface(void) +{ + xpc_interface.connect = (void (*)(int))xpc_notloaded; + xpc_interface.disconnect = (void (*)(int))xpc_notloaded; + xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, + void **))xpc_notloaded; + xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, + xpc_notify_func, + void *))xpc_notloaded; + xpc_interface.received = (void (*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) + xpc_notloaded; +} +EXPORT_SYMBOL_GPL(xpc_clear_interface); + +/* + * Register for automatic establishment of a channel connection whenever + * a partition comes up. + * + * Arguments: + * + * ch_number - channel # to register for connection. + * func - function to call for asynchronous notification of channel + * state changes (i.e., connection, disconnection, error) and + * the arrival of incoming messages. + * key - pointer to optional user-defined value that gets passed back + * to the user on any callouts made to func. + * payload_size - size in bytes of the XPC message's payload area which + * contains a user-defined message. The user should make + * this large enough to hold their largest message. + * nentries - max #of XPC message entries a message queue can contain. + * The actual number, which is determined when a connection + * is established and may be less then requested, will be + * passed to the user via the xpcConnected callout. + * assigned_limit - max number of kthreads allowed to be processing + * messages (per connection) at any given instant. + * idle_limit - max number of kthreads allowed to be idle at any given + * instant. + */ +enum xpc_retval +xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, + u16 nentries, u32 assigned_limit, u32 idle_limit) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(payload_size == 0 || nentries == 0); + DBUG_ON(func == NULL); + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + + registration = &xpc_registrations[ch_number]; + + if (mutex_lock_interruptible(®istration->mutex) != 0) + return xpcInterrupted; + + /* if XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func != NULL) { + mutex_unlock(®istration->mutex); + return xpcAlreadyRegistered; + } + + /* register the channel for connection */ + registration->msg_size = XPC_MSG_SIZE(payload_size); + registration->nentries = nentries; + registration->assigned_limit = assigned_limit; + registration->idle_limit = idle_limit; + registration->key = key; + registration->func = func; + + mutex_unlock(®istration->mutex); + + xpc_interface.connect(ch_number); + + return xpcSuccess; +} +EXPORT_SYMBOL_GPL(xpc_connect); + +/* + * Remove the registration for automatic connection of the specified channel + * when a partition comes up. + * + * Before returning this xpc_disconnect() will wait for all connections on the + * specified channel have been closed/torndown. So the caller can be assured + * that they will not be receiving any more callouts from XPC to their + * function registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_disconnect(int ch_number) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + registration = &xpc_registrations[ch_number]; + + /* + * We've decided not to make this a down_interruptible(), since we + * figured XPC's users will just turn around and call xpc_disconnect() + * again anyways, so we might as well wait, if need be. + */ + mutex_lock(®istration->mutex); + + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func == NULL) { + mutex_unlock(®istration->mutex); + return; + } + + /* remove the connection registration for the specified channel */ + registration->func = NULL; + registration->key = NULL; + registration->nentries = 0; + registration->msg_size = 0; + registration->assigned_limit = 0; + registration->idle_limit = 0; + + xpc_interface.disconnect(ch_number); + + mutex_unlock(®istration->mutex); + + return; +} +EXPORT_SYMBOL_GPL(xpc_disconnect); + +int __init +xp_init(void) +{ + int ret, ch_number; + u64 func_addr = *(u64 *)xp_nofault_PIOR; + u64 err_func_addr = *(u64 *)xp_error_PIOR; + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + /* + * Register a nofault code region which performs a cross-partition + * PIO read. If the PIO read times out, the MCA handler will consume + * the error and return to a kernel-provided instruction to indicate + * an error. This PIO read exists because it is guaranteed to timeout + * if the destination is down (AMO operations do not timeout on at + * least some CPUs on Shubs <= v1.2, which unfortunately we have to + * work around). + */ + ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, + 1, 1); + if (ret != 0) { + printk(KERN_ERR "XP: can't register nofault code, error=%d\n", + ret); + } + /* + * Setup the nofault PIO read target. (There is no special reason why + * SH_IPI_ACCESS was selected.) + */ + if (is_shub2()) + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + else + xp_nofault_PIOR_target = SH1_IPI_ACCESS; + + /* initialize the connection registration mutex */ + for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) + mutex_init(&xpc_registrations[ch_number].mutex); + + return 0; +} + +module_init(xp_init); + +void __exit +xp_exit(void) +{ + u64 func_addr = *(u64 *)xp_nofault_PIOR; + u64 err_func_addr = *(u64 *)xp_error_PIOR; + + /* unregister the PIO read nofault code region */ + (void)sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 0); +} + +module_exit(xp_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition (XP) base"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sgi-xp/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S new file mode 100644 index 00000000000..e38d4331942 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_nofault.S @@ -0,0 +1,35 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * The xp_nofault_PIOR function takes a pointer to a remote PIO register + * and attempts to load and consume a value from it. This function + * will be registered as a nofault code block. In the event that the + * PIO read fails, the MCA handler will force the error to look + * corrected and vector to the xp_error_PIOR which will return an error. + * + * The definition of "consumption" and the time it takes for an MCA + * to surface is processor implementation specific. This code + * is sufficient on Itanium through the Montvale processor family. + * It may need to be adjusted for future processor implementations. + * + * extern int xp_nofault_PIOR(void *remote_register); + */ + + .global xp_nofault_PIOR +xp_nofault_PIOR: + mov r8=r0 // Stage a success return value + ld8.acq r9=[r32];; // PIO Read the specified register + adds r9=1,r9;; // Add to force consumption + srlz.i;; // Allow time for MCA to surface + br.ret.sptk.many b0;; // Return success + + .global xp_error_PIOR +xp_error_PIOR: + mov r8=1 // Return value of 1 + br.ret.sptk.many b0;; // Return failure diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h new file mode 100644 index 00000000000..9eb6d4a3269 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc.h @@ -0,0 +1,1187 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) structures and macros. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XPC_H +#define _DRIVERS_MISC_SGIXP_XPC_H + +#include <linux/interrupt.h> +#include <linux/sysctl.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/completion.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/sn/bte.h> +#include <asm/sn/clksupport.h> +#include <asm/sn/addrs.h> +#include <asm/sn/mspec.h> +#include <asm/sn/shub_mmr.h> +#include "xp.h" + +/* + * XPC Version numbers consist of a major and minor number. XPC can always + * talk to versions with same major #, and never talk to versions with a + * different major #. + */ +#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) +#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) + +/* + * The next macros define word or bit representations for given + * C-brick nasid in either the SAL provided bit array representing + * nasids in the partition/machine or the AMO_t array used for + * inter-partition initiation communications. + * + * For SN2 machines, C-Bricks are alway even numbered NASIDs. As + * such, some space will be saved by insisting that nasid information + * passed from SAL always be packed for C-Bricks and the + * cross-partition interrupts use the same packing scheme. + */ +#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) +#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) +#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ + (1UL << XPC_NASID_B_INDEX(_n))) +#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) + +#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ +#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ + +/* define the process name of HB checker and the CPU it is pinned to */ +#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" +#define XPC_HB_CHECK_CPU 0 + +/* define the process name of the discovery thread */ +#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" + +/* + * the reserved page + * + * SAL reserves one page of memory per partition for XPC. Though a full page + * in length (16384 bytes), its starting address is not page aligned, but it + * is cacheline aligned. The reserved page consists of the following: + * + * reserved page header + * + * The first cacheline of the reserved page contains the header + * (struct xpc_rsvd_page). Before SAL initialization has completed, + * SAL has set up the following fields of the reserved page header: + * SAL_signature, SAL_version, partid, and nasids_size. The other + * fields are set up by XPC. (xpc_rsvd_page points to the local + * partition's reserved page.) + * + * part_nasids mask + * mach_nasids mask + * + * SAL also sets up two bitmaps (or masks), one that reflects the actual + * nasids in this partition (part_nasids), and the other that reflects + * the actual nasids in the entire machine (mach_nasids). We're only + * interested in the even numbered nasids (which contain the processors + * and/or memory), so we only need half as many bits to represent the + * nasids. The part_nasids mask is located starting at the first cacheline + * following the reserved page header. The mach_nasids mask follows right + * after the part_nasids mask. The size in bytes of each mask is reflected + * by the reserved page header field 'nasids_size'. (Local partition's + * mask pointers are xpc_part_nasids and xpc_mach_nasids.) + * + * vars + * vars part + * + * Immediately following the mach_nasids mask are the XPC variables + * required by other partitions. First are those that are generic to all + * partitions (vars), followed on the next available cacheline by those + * which are partition specific (vars part). These are setup by XPC. + * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) + * + * Note: Until vars_pa is set, the partition XPC code has not been initialized. + */ +struct xpc_rsvd_page { + u64 SAL_signature; /* SAL: unique signature */ + u64 SAL_version; /* SAL: version */ + u8 partid; /* SAL: partition ID */ + u8 version; + u8 pad1[6]; /* align to next u64 in cacheline */ + u64 vars_pa; /* physical address of struct xpc_vars */ + struct timespec stamp; /* time when reserved page was setup by XPC */ + u64 pad2[9]; /* align to last u64 in cacheline */ + u64 nasids_size; /* SAL: size of each nasid mask in bytes */ +}; + +#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */ + +#define XPC_SUPPORTS_RP_STAMP(_version) \ + (_version >= _XPC_VERSION(1, 1)) + +/* + * compare stamps - the return value is: + * + * < 0, if stamp1 < stamp2 + * = 0, if stamp1 == stamp2 + * > 0, if stamp1 > stamp2 + */ +static inline int +xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2) +{ + int ret; + + ret = stamp1->tv_sec - stamp2->tv_sec; + if (ret == 0) + ret = stamp1->tv_nsec - stamp2->tv_nsec; + + return ret; +} + +/* + * Define the structures by which XPC variables can be exported to other + * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) + */ + +/* + * The following structure describes the partition generic variables + * needed by other partitions in order to properly initialize. + * + * struct xpc_vars version number also applies to struct xpc_vars_part. + * Changes to either structure and/or related functionality should be + * reflected by incrementing either the major or minor version numbers + * of struct xpc_vars. + */ +struct xpc_vars { + u8 version; + u64 heartbeat; + u64 heartbeating_to_mask; + u64 heartbeat_offline; /* if 0, heartbeat should be changing */ + int act_nasid; + int act_phys_cpuid; + u64 vars_part_pa; + u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ + AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ +}; + +#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ + +#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ + (_version >= _XPC_VERSION(3, 1)) + +static inline int +xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) +{ + return ((vars->heartbeating_to_mask & (1UL << partid)) != 0); +} + +static inline void +xpc_allow_hb(partid_t partid, struct xpc_vars *vars) +{ + u64 old_mask, new_mask; + + do { + old_mask = vars->heartbeating_to_mask; + new_mask = (old_mask | (1UL << partid)); + } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != + old_mask); +} + +static inline void +xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) +{ + u64 old_mask, new_mask; + + do { + old_mask = vars->heartbeating_to_mask; + new_mask = (old_mask & ~(1UL << partid)); + } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != + old_mask); +} + +/* + * The AMOs page consists of a number of AMO variables which are divided into + * four groups, The first two groups are used to identify an IRQ's sender. + * These two groups consist of 64 and 128 AMO variables respectively. The last + * two groups, consisting of just one AMO variable each, are used to identify + * the remote partitions that are currently engaged (from the viewpoint of + * the XPC running on the remote partition). + */ +#define XPC_NOTIFY_IRQ_AMOS 0 +#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS) +#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) +#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) + +/* + * The following structure describes the per partition specific variables. + * + * An array of these structures, one per partition, will be defined. As a + * partition becomes active XPC will copy the array entry corresponding to + * itself from that partition. It is desirable that the size of this + * structure evenly divide into a cacheline, such that none of the entries + * in this array crosses a cacheline boundary. As it is now, each entry + * occupies half a cacheline. + */ +struct xpc_vars_part { + u64 magic; + + u64 openclose_args_pa; /* physical address of open and close args */ + u64 GPs_pa; /* physical address of Get/Put values */ + + u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ + int IPI_nasid; /* nasid of where to send IPIs */ + int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ + + u8 nchannels; /* #of defined channels supported */ + + u8 reserved[23]; /* pad to a full 64 bytes */ +}; + +/* + * The vars_part MAGIC numbers play a part in the first contact protocol. + * + * MAGIC1 indicates that the per partition specific variables for a remote + * partition have been initialized by this partition. + * + * MAGIC2 indicates that this partition has pulled the remote partititions + * per partition variables that pertain to this partition. + */ +#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ +#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ + +/* the reserved page sizes and offsets */ + +#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) +#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) + +#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) +#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) +#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ + xp_nasid_mask_words)) +#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \ + ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)) + +/* + * Functions registered by add_timer() or called by kernel_thread() only + * allow for a single 64-bit argument. The following macros can be used to + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from + * the passed argument. + */ +#define XPC_PACK_ARGS(_arg1, _arg2) \ + ((((u64) _arg1) & 0xffffffff) | \ + ((((u64) _arg2) & 0xffffffff) << 32)) + +#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) + +/* + * Define a Get/Put value pair (pointers) used with a message queue. + */ +struct xpc_gp { + s64 get; /* Get value */ + s64 put; /* Put value */ +}; + +#define XPC_GP_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) + +/* + * Define a structure that contains arguments associated with opening and + * closing a channel. + */ +struct xpc_openclose_args { + u16 reason; /* reason why channel is closing */ + u16 msg_size; /* sizeof each message entry */ + u16 remote_nentries; /* #of message entries in remote msg queue */ + u16 local_nentries; /* #of message entries in local msg queue */ + u64 local_msgqueue_pa; /* physical address of local message queue */ +}; + +#define XPC_OPENCLOSE_ARGS_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) + +/* struct xpc_msg flags */ + +#define XPC_M_DONE 0x01 /* msg has been received/consumed */ +#define XPC_M_READY 0x02 /* msg is ready to be sent */ +#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ + +#define XPC_MSG_ADDRESS(_payload) \ + ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) + +/* + * Defines notify entry. + * + * This is used to notify a message's sender that their message was received + * and consumed by the intended recipient. + */ +struct xpc_notify { + u8 type; /* type of notification */ + + /* the following two fields are only used if type == XPC_N_CALL */ + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* struct xpc_notify type of notification */ + +#define XPC_N_CALL 0x01 /* notify function provided by user */ + +/* + * Define the structure that manages all the stuff required by a channel. In + * particular, they are used to manage the messages sent across the channel. + * + * This structure is private to a partition, and is NOT shared across the + * partition boundary. + * + * There is an array of these structures for each remote partition. It is + * allocated at the time a partition becomes active. The array contains one + * of these structures for each potential channel connection to that partition. + * + * Each of these structures manages two message queues (circular buffers). + * They are allocated at the time a channel connection is made. One of + * these message queues (local_msgqueue) holds the locally created messages + * that are destined for the remote partition. The other of these message + * queues (remote_msgqueue) is a locally cached copy of the remote partition's + * own local_msgqueue. + * + * The following is a description of the Get/Put pointers used to manage these + * two message queues. Consider the local_msgqueue to be on one partition + * and the remote_msgqueue to be its cached copy on another partition. A + * description of what each of the lettered areas contains is included. + * + * + * local_msgqueue remote_msgqueue + * + * |/////////| |/////////| + * w_remote_GP.get --> +---------+ |/////////| + * | F | |/////////| + * remote_GP.get --> +---------+ +---------+ <-- local_GP->get + * | | | | + * | | | E | + * | | | | + * | | +---------+ <-- w_local_GP.get + * | B | |/////////| + * | | |////D////| + * | | |/////////| + * | | +---------+ <-- w_remote_GP.put + * | | |////C////| + * local_GP->put --> +---------+ +---------+ <-- remote_GP.put + * | | |/////////| + * | A | |/////////| + * | | |/////////| + * w_local_GP.put --> +---------+ |/////////| + * |/////////| |/////////| + * + * + * ( remote_GP.[get|put] are cached copies of the remote + * partition's local_GP->[get|put], and thus their values can + * lag behind their counterparts on the remote partition. ) + * + * + * A - Messages that have been allocated, but have not yet been sent to the + * remote partition. + * + * B - Messages that have been sent, but have not yet been acknowledged by the + * remote partition as having been received. + * + * C - Area that needs to be prepared for the copying of sent messages, by + * the clearing of the message flags of any previously received messages. + * + * D - Area into which sent messages are to be copied from the remote + * partition's local_msgqueue and then delivered to their intended + * recipients. [ To allow for a multi-message copy, another pointer + * (next_msg_to_pull) has been added to keep track of the next message + * number needing to be copied (pulled). It chases after w_remote_GP.put. + * Any messages lying between w_local_GP.get and next_msg_to_pull have + * been copied and are ready to be delivered. ] + * + * E - Messages that have been copied and delivered, but have not yet been + * acknowledged by the recipient as having been received. + * + * F - Messages that have been acknowledged, but XPC has not yet notified the + * sender that the message was received by its intended recipient. + * This is also an area that needs to be prepared for the allocating of + * new messages, by the clearing of the message flags of the acknowledged + * messages. + */ +struct xpc_channel { + partid_t partid; /* ID of remote partition connected */ + spinlock_t lock; /* lock for updating this structure */ + u32 flags; /* general flags */ + + enum xpc_retval reason; /* reason why channel is disconnect'g */ + int reason_line; /* line# disconnect initiated from */ + + u16 number; /* channel # */ + + u16 msg_size; /* sizeof each msg entry */ + u16 local_nentries; /* #of msg entries in local msg queue */ + u16 remote_nentries; /* #of msg entries in remote msg queue */ + + void *local_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *local_msgqueue; /* local message queue */ + void *remote_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ + /* local message queue */ + u64 remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ + + atomic_t references; /* #of external references to queues */ + + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ + + u8 delayed_IPI_flags; /* IPI flags received, but delayed */ + /* action until channel disconnected */ + + /* queue of msg senders who want to be notified when msg received */ + + atomic_t n_to_notify; /* #of msg senders to notify */ + struct xpc_notify *notify_queue; /* notify queue for messages sent */ + + xpc_channel_func func; /* user's channel function */ + void *key; /* pointer to user's key */ + + struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ + struct completion wdisconnect_wait; /* wait for channel disconnect */ + + struct xpc_openclose_args *local_openclose_args; /* args passed on */ + /* opening or closing of channel */ + + /* various flavors of local and remote Get/Put values */ + + struct xpc_gp *local_GP; /* local Get/Put values */ + struct xpc_gp remote_GP; /* remote Get/Put values */ + struct xpc_gp w_local_GP; /* working local Get/Put values */ + struct xpc_gp w_remote_GP; /* working remote Get/Put values */ + s64 next_msg_to_pull; /* Put value of next msg to pull */ + + /* kthread management related fields */ + + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ + atomic_t kthreads_active; /* #of kthreads actively working */ + + wait_queue_head_t idle_wq; /* idle kthread wait queue */ + +} ____cacheline_aligned; + +/* struct xpc_channel flags */ + +#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ + +#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ + +#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ +#define XPC_C_CONNECTEDCALLOUT_MADE \ + 0x00000080 /* connected callout completed */ +#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ +#define XPC_C_DISCONNECTINGCALLOUT \ + 0x00010000 /* disconnecting callout initiated */ +#define XPC_C_DISCONNECTINGCALLOUT_MADE \ + 0x00020000 /* disconnecting callout completed */ +#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ + +/* + * Manages channels on a partition basis. There is one of these structures + * for each partition (a partition will never utilize the structure that + * represents itself). + */ +struct xpc_partition { + + /* XPC HB infrastructure */ + + u8 remote_rp_version; /* version# of partition's rsvd pg */ + struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ + u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ + u64 remote_vars_pa; /* phys addr of partition's vars */ + u64 remote_vars_part_pa; /* phys addr of partition's vars part */ + u64 last_heartbeat; /* HB at last read */ + u64 remote_amos_page_pa; /* phys addr of partition's amos page */ + int remote_act_nasid; /* active part's act/deact nasid */ + int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ + u32 act_IRQ_rcvd; /* IRQs since activation */ + spinlock_t act_lock; /* protect updating of act_state */ + u8 act_state; /* from XPC HB viewpoint */ + u8 remote_vars_version; /* version# of partition's vars */ + enum xpc_retval reason; /* reason partition is deactivating */ + int reason_line; /* line# deactivation initiated from */ + int reactivate_nasid; /* nasid in partition to reactivate */ + + unsigned long disengage_request_timeout; /* timeout in jiffies */ + struct timer_list disengage_request_timer; + + /* XPC infrastructure referencing and teardown control */ + + u8 setup_state; /* infrastructure setup state */ + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ + atomic_t references; /* #of references to infrastructure */ + + /* + * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN + * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION + * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE + * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) + */ + + u8 nchannels; /* #of defined channels supported */ + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ + atomic_t nchannels_engaged; /* #of channels engaged with remote part */ + struct xpc_channel *channels; /* array of channel structures */ + + void *local_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *local_GPs; /* local Get/Put values */ + void *remote_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *remote_GPs; /* copy of remote partition's local */ + /* Get/Put values */ + u64 remote_GPs_pa; /* phys address of remote partition's local */ + /* Get/Put values */ + + /* fields used to pass args when opening or closing a channel */ + + void *local_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *local_openclose_args; /* local's args */ + void *remote_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ + /* args */ + u64 remote_openclose_args_pa; /* phys addr of remote's args */ + + /* IPI sending, receiving and handling related fields */ + + int remote_IPI_nasid; /* nasid of where to send IPIs */ + int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ + AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ + + AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ + u64 local_IPI_amo; /* IPI amo flags yet to be handled */ + char IPI_owner[8]; /* IPI owner's name */ + struct timer_list dropped_IPI_timer; /* dropped IPI timer */ + + spinlock_t IPI_lock; /* IPI handler lock */ + + /* channel manager related fields */ + + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ + +} ____cacheline_aligned; + +/* struct xpc_partition act_state values (for XPC HB) */ + +#define XPC_P_INACTIVE 0x00 /* partition is not active */ +#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ +#define XPC_P_ACTIVATING 0x02 /* activation thread started */ +#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ +#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ + +#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ + xpc_deactivate_partition(__LINE__, (_p), (_reason)) + +/* struct xpc_partition setup_state values */ + +#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ +#define XPC_P_SETUP 0x01 /* infrastructure is setup */ +#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ +#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ + +/* + * struct xpc_partition IPI_timer #of seconds to wait before checking for + * dropped IPIs. These occur whenever an IPI amo write doesn't complete until + * after the IPI was received. + */ +#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) + +/* number of seconds to wait for other partitions to disengage */ +#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 + +/* interval in seconds to print 'waiting disengagement' messages */ +#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 + +#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) + +/* found in xp_main.c */ +extern struct xpc_registration xpc_registrations[]; + +/* found in xpc_main.c */ +extern struct device *xpc_part; +extern struct device *xpc_chan; +extern int xpc_disengage_request_timelimit; +extern int xpc_disengage_request_timedout; +extern irqreturn_t xpc_notify_IRQ_handler(int, void *); +extern void xpc_dropped_IPI_check(struct xpc_partition *); +extern void xpc_activate_partition(struct xpc_partition *); +extern void xpc_activate_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int, int); +extern void xpc_disconnect_wait(int); + +/* found in xpc_partition.c */ +extern int xpc_exiting; +extern struct xpc_vars *xpc_vars; +extern struct xpc_rsvd_page *xpc_rsvd_page; +extern struct xpc_vars_part *xpc_vars_part; +extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; +extern char *xpc_remote_copy_buffer; +extern void *xpc_remote_copy_buffer_base; +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); +extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); +extern void xpc_allow_IPI_ops(void); +extern void xpc_restrict_IPI_ops(void); +extern int xpc_identify_act_IRQ_sender(void); +extern int xpc_partition_disengaged(struct xpc_partition *); +extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); +extern void xpc_mark_partition_inactive(struct xpc_partition *); +extern void xpc_discovery(void); +extern void xpc_check_remote_hb(void); +extern void xpc_deactivate_partition(const int, struct xpc_partition *, + enum xpc_retval); +extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); + +/* found in xpc_channel.c */ +extern void xpc_initiate_connect(int); +extern void xpc_initiate_disconnect(int); +extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); +extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); +extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, + xpc_notify_func, void *); +extern void xpc_initiate_received(partid_t, int, void *); +extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); +extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); +extern void xpc_process_channel_activity(struct xpc_partition *); +extern void xpc_connected_callout(struct xpc_channel *); +extern void xpc_deliver_msg(struct xpc_channel *); +extern void xpc_disconnect_channel(const int, struct xpc_channel *, + enum xpc_retval, unsigned long *); +extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); +extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); +extern void xpc_teardown_infrastructure(struct xpc_partition *); + +static inline void +xpc_wakeup_channel_mgr(struct xpc_partition *part) +{ + if (atomic_inc_return(&part->channel_mgr_requests) == 1) + wake_up(&part->channel_mgr_wq); +} + +/* + * These next two inlines are used to keep us from tearing down a channel's + * msg queues while a thread may be referencing them. + */ +static inline void +xpc_msgqueue_ref(struct xpc_channel *ch) +{ + atomic_inc(&ch->references); +} + +static inline void +xpc_msgqueue_deref(struct xpc_channel *ch) +{ + s32 refs = atomic_dec_return(&ch->references); + + DBUG_ON(refs < 0); + if (refs == 0) + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); +} + +#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) + +/* + * These two inlines are used to keep us from tearing down a partition's + * setup infrastructure while a thread may be referencing it. + */ +static inline void +xpc_part_deref(struct xpc_partition *part) +{ + s32 refs = atomic_dec_return(&part->references); + + DBUG_ON(refs < 0); + if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) + wake_up(&part->teardown_wq); +} + +static inline int +xpc_part_ref(struct xpc_partition *part) +{ + int setup; + + atomic_inc(&part->references); + setup = (part->setup_state == XPC_P_SETUP); + if (!setup) + xpc_part_deref(part); + + return setup; +} + +/* + * The following macro is to be used for the setting of the reason and + * reason_line fields in both the struct xpc_channel and struct xpc_partition + * structures. + */ +#define XPC_SET_REASON(_p, _reason, _line) \ + { \ + (_p)->reason = _reason; \ + (_p)->reason_line = _line; \ + } + +/* + * This next set of inlines are used to keep track of when a partition is + * potentially engaged in accessing memory belonging to another partition. + */ + +static inline void +xpc_mark_partition_engaged(struct xpc_partition *part) +{ + unsigned long irq_flags; + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + + (XPC_ENGAGED_PARTITIONS_AMO * + sizeof(AMO_t))); + + local_irq_save(irq_flags); + + /* set bit corresponding to our partid in remote partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, + (1UL << sn_partition_id)); + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static inline void +xpc_mark_partition_disengaged(struct xpc_partition *part) +{ + unsigned long irq_flags; + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + + (XPC_ENGAGED_PARTITIONS_AMO * + sizeof(AMO_t))); + + local_irq_save(irq_flags); + + /* clear bit corresponding to our partid in remote partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~(1UL << sn_partition_id)); + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static inline void +xpc_request_partition_disengage(struct xpc_partition *part) +{ + unsigned long irq_flags; + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); + + local_irq_save(irq_flags); + + /* set bit corresponding to our partid in remote partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, + (1UL << sn_partition_id)); + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static inline void +xpc_cancel_partition_disengage_request(struct xpc_partition *part) +{ + unsigned long irq_flags; + AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + + (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); + + local_irq_save(irq_flags); + + /* clear bit corresponding to our partid in remote partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~(1UL << sn_partition_id)); + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> + variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); +} + +static inline u64 +xpc_partition_engaged(u64 partid_mask) +{ + AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; + + /* return our partition's AMO variable ANDed with partid_mask */ + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & + partid_mask); +} + +static inline u64 +xpc_partition_disengage_requested(u64 partid_mask) +{ + AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; + + /* return our partition's AMO variable ANDed with partid_mask */ + return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & + partid_mask); +} + +static inline void +xpc_clear_partition_engaged(u64 partid_mask) +{ + AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; + + /* clear bit(s) based on partid_mask in our partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~partid_mask); +} + +static inline void +xpc_clear_partition_disengage_request(u64 partid_mask) +{ + AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; + + /* clear bit(s) based on partid_mask in our partition's AMO */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~partid_mask); +} + +/* + * The following set of macros and inlines are used for the sending and + * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, + * one that is associated with partition activity (SGI_XPC_ACTIVATE) and + * the other that is associated with channel activity (SGI_XPC_NOTIFY). + */ + +static inline u64 +xpc_IPI_receive(AMO_t *amo) +{ + return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); +} + +static inline enum xpc_retval +xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) +{ + int ret = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); + sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + return ((ret == 0) ? xpcSuccess : xpcPioReadError); +} + +/* + * IPIs associated with SGI_XPC_ACTIVATE IRQ. + */ + +/* + * Flag the appropriate AMO variable and send an IPI to the specified node. + */ +static inline void +xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, + int to_phys_cpuid) +{ + int w_index = XPC_NASID_W_INDEX(from_nasid); + int b_index = XPC_NASID_B_INDEX(from_nasid); + AMO_t *amos = (AMO_t *)__va(amos_page_pa + + (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); + + (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, + to_phys_cpuid, SGI_XPC_ACTIVATE); +} + +static inline void +xpc_IPI_send_activate(struct xpc_vars *vars) +{ + xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), + vars->act_nasid, vars->act_phys_cpuid); +} + +static inline void +xpc_IPI_send_activated(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), + part->remote_act_nasid, + part->remote_act_phys_cpuid); +} + +static inline void +xpc_IPI_send_reactivate(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, + xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); +} + +static inline void +xpc_IPI_send_disengage(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), + part->remote_act_nasid, + part->remote_act_phys_cpuid); +} + +/* + * IPIs associated with SGI_XPC_NOTIFY IRQ. + */ + +/* + * Send an IPI to the remote partition that is associated with the + * specified channel. + */ +#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ + xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) + +static inline void +xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, + unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + enum xpc_retval ret; + + if (likely(part->act_state != XPC_P_DEACTIVATING)) { + ret = xpc_IPI_send(part->remote_IPI_amo_va, + (u64)ipi_flag << (ch->number * 8), + part->remote_IPI_nasid, + part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); + dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", + ipi_flag_string, ch->partid, ch->number, ret); + if (unlikely(ret != xpcSuccess)) { + if (irq_flags != NULL) + spin_unlock_irqrestore(&ch->lock, *irq_flags); + XPC_DEACTIVATE_PARTITION(part, ret); + if (irq_flags != NULL) + spin_lock_irqsave(&ch->lock, *irq_flags); + } + } +} + +/* + * Make it look like the remote partition, which is associated with the + * specified channel, sent us an IPI. This faked IPI will be handled + * by xpc_dropped_IPI_check(). + */ +#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ + xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) + +static inline void +xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, + char *ipi_flag_string) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + + FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable), + FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); + dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", + ipi_flag_string, ch->partid, ch->number); +} + +/* + * The sending and receiving of IPIs includes the setting of an AMO variable + * to indicate the reason the IPI was sent. The 64-bit variable is divided + * up into eight bytes, ordered from right to left. Byte zero pertains to + * channel 0, byte one to channel 1, and so on. Each byte is described by + * the following IPI flags. + */ + +#define XPC_IPI_CLOSEREQUEST 0x01 +#define XPC_IPI_CLOSEREPLY 0x02 +#define XPC_IPI_OPENREQUEST 0x04 +#define XPC_IPI_OPENREPLY 0x08 +#define XPC_IPI_MSGREQUEST 0x10 + +/* given an AMO variable and a channel#, get its associated IPI flags */ +#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) +#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) + +#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL) +#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL) + +static inline void +xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + args->reason = ch->reason; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + args->msg_size = ch->msg_size; + args->local_nentries = ch->local_nentries; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + args->remote_nentries = ch->remote_nentries; + args->local_nentries = ch->local_nentries; + args->local_msgqueue_pa = __pa(ch->local_msgqueue); + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); +} + +static inline void +xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); +} + +/* + * Memory for XPC's AMO variables is allocated by the MSPEC driver. These + * pages are located in the lowest granule. The lowest granule uses 4k pages + * for cached references and an alternate TLB handler to never provide a + * cacheable mapping for the entire region. This will prevent speculative + * reading of cached copies of our lines from being issued which will cause + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 + * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an + * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition + * activation and 2 AMO variables for partition deactivation. + */ +static inline AMO_t * +xpc_IPI_init(int index) +{ + AMO_t *amo = xpc_vars->amos_page + index; + + (void)xpc_IPI_receive(amo); /* clear AMO variable */ + return amo; +} + +static inline enum xpc_retval +xpc_map_bte_errors(bte_result_t error) +{ + if (error == BTE_SUCCESS) + return xpcSuccess; + + if (is_shub2()) { + if (BTE_VALID_SH2_ERROR(error)) + return xpcBteSh2Start + error; + return xpcBteUnmappedError; + } + switch (error) { + case BTE_SUCCESS: + return xpcSuccess; + case BTEFAIL_DIR: + return xpcBteDirectoryError; + case BTEFAIL_POISON: + return xpcBtePoisonError; + case BTEFAIL_WERR: + return xpcBteWriteError; + case BTEFAIL_ACCESS: + return xpcBteAccessError; + case BTEFAIL_PWERR: + return xpcBtePWriteError; + case BTEFAIL_PRERR: + return xpcBtePReadError; + case BTEFAIL_TOUT: + return xpcBteTimeOutError; + case BTEFAIL_XTERR: + return xpcBteXtalkError; + case BTEFAIL_NOTAVAIL: + return xpcBteNotAvailable; + default: + return xpcBteUnmappedError; + } +} + +/* + * Check to see if there is any channel activity to/from the specified + * partition. + */ +static inline void +xpc_check_for_channel_activity(struct xpc_partition *part) +{ + u64 IPI_amo; + unsigned long irq_flags; + + IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); + if (IPI_amo == 0) + return; + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + part->local_IPI_amo |= IPI_amo; + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", + XPC_PARTID(part), IPI_amo); + + xpc_wakeup_channel_mgr(part); +} + +#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c new file mode 100644 index 00000000000..bfcb9ea968e --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -0,0 +1,2243 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) channel support. + * + * This is the part of XPC that manages the channels and + * sends/receives messages across them to/from other partitions. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/cache.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/completion.h> +#include <asm/sn/bte.h> +#include <asm/sn/sn_sal.h> +#include "xpc.h" + +/* + * Guarantee that the kzalloc'd memory is cacheline aligned. + */ +static void * +xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kzalloc will give us cachline aligned memory by default */ + *base = kzalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kzalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Set up the initial values for the XPartition Communication channels. + */ +static void +xpc_initialize_channels(struct xpc_partition *part, partid_t partid) +{ + int ch_number; + struct xpc_channel *ch; + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + ch->local_GP = &part->local_GPs[ch_number]; + ch->local_openclose_args = + &part->local_openclose_args[ch_number]; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + mutex_init(&ch->msg_to_pull_mutex); + init_completion(&ch->wdisconnect_wait); + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } +} + +/* + * Setup the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +enum xpc_retval +xpc_setup_infrastructure(struct xpc_partition *part) +{ + int ret, cpuid; + struct timer_list *timer; + partid_t partid = XPC_PARTID(part); + + /* + * Zero out MOST of the entry for this partition. Only the fields + * starting with `nchannels' will be zeroed. The preceding fields must + * remain `viable' across partition ups and downs, since they may be + * referenced during this memset() operation. + */ + memset(&part->nchannels, 0, sizeof(struct xpc_partition) - + offsetof(struct xpc_partition, nchannels)); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpcNoMemory; + } + + part->nchannels = XPC_NCHANNELS; + + /* allocate all the required GET/PUT values */ + + part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, + &part->local_GPs_base); + if (part->local_GPs == NULL) { + kfree(part->channels); + part->channels = NULL; + dev_err(xpc_chan, "can't get memory for local get/put " + "values\n"); + return xpcNoMemory; + } + + part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, + &part-> + remote_GPs_base); + if (part->remote_GPs == NULL) { + dev_err(xpc_chan, "can't get memory for remote get/put " + "values\n"); + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + return xpcNoMemory; + } + + /* allocate all the required open and close args */ + + part->local_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->local_openclose_args_base); + if (part->local_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for local connect args\n"); + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + return xpcNoMemory; + } + + part->remote_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + return xpcNoMemory; + } + + xpc_initialize_channels(part, partid); + + atomic_set(&part->nchannels_active, 0); + atomic_set(&part->nchannels_engaged, 0); + + /* local_IPI_amo were set to 0 by an earlier memset() */ + + /* Initialize this partitions AMO_t structure */ + part->local_IPI_amo_va = xpc_IPI_init(partid); + + spin_lock_init(&part->IPI_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + sprintf(part->IPI_owner, "xpc%02d", partid); + ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, + part->IPI_owner, (void *)(u64)partid); + if (ret != 0) { + dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " + "errno=%d\n", -ret); + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + return xpcLackOfResources; + } + + /* Setup a timer to check for dropped IPIs */ + timer = &part->dropped_IPI_timer; + init_timer(timer); + timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; + timer->data = (unsigned long)part; + timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; + add_timer(timer); + + /* + * With the setting of the partition setup_state to XPC_P_SETUP, we're + * declaring that this partition is ready to go. + */ + part->setup_state = XPC_P_SETUP; + + /* + * Setup the per partition specific variables required by the + * remote partition to establish channel connections with us. + * + * The setting of the magic # indicates that these per partition + * specific variables are ready to be used. + */ + xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); + xpc_vars_part[partid].openclose_args_pa = + __pa(part->local_openclose_args); + xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); + cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ + xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); + xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); + xpc_vars_part[partid].nchannels = part->nchannels; + xpc_vars_part[partid].magic = XPC_VP_MAGIC1; + + return xpcSuccess; +} + +/* + * Create a wrapper that hides the underlying mechanism for pulling a cacheline + * (or multiple cachelines) from a remote partition. + * + * src must be a cacheline aligned physical address on the remote partition. + * dst must be a cacheline aligned virtual address on this partition. + * cnt must be an cacheline sized + */ +static enum xpc_retval +xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, + const void *src, size_t cnt) +{ + bte_result_t bte_ret; + + DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); + DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); + DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); + + if (part->act_state == XPC_P_DEACTIVATING) + return part->reason; + + bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, + (BTE_NORMAL | BTE_WACQUIRE), NULL); + if (bte_ret == BTE_SUCCESS) + return xpcSuccess; + + dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", + XPC_PARTID(part), bte_ret); + + return xpc_map_bte_errors(bte_ret); +} + +/* + * Pull the remote per partition specific variables from the specified + * partition. + */ +enum xpc_retval +xpc_pull_remote_vars_part(struct xpc_partition *part) +{ + u8 buffer[L1_CACHE_BYTES * 2]; + struct xpc_vars_part *pulled_entry_cacheline = + (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer); + struct xpc_vars_part *pulled_entry; + u64 remote_entry_cacheline_pa, remote_entry_pa; + partid_t partid = XPC_PARTID(part); + enum xpc_retval ret; + + /* pull the cacheline that contains the variables we're interested in */ + + DBUG_ON(part->remote_vars_part_pa != + L1_CACHE_ALIGN(part->remote_vars_part_pa)); + DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); + + remote_entry_pa = part->remote_vars_part_pa + + sn_partition_id * sizeof(struct xpc_vars_part); + + remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); + + pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline + + (remote_entry_pa & + (L1_CACHE_BYTES - 1))); + + ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, + (void *)remote_entry_cacheline_pa, + L1_CACHE_BYTES); + if (ret != xpcSuccess) { + dev_dbg(xpc_chan, "failed to pull XPC vars_part from " + "partition %d, ret=%d\n", partid, ret); + return ret; + } + + /* see if they've been set up yet */ + + if (pulled_entry->magic != XPC_VP_MAGIC1 && + pulled_entry->magic != XPC_VP_MAGIC2) { + + if (pulled_entry->magic != 0) { + dev_dbg(xpc_chan, "partition %d's XPC vars_part for " + "partition %d has bad magic value (=0x%lx)\n", + partid, sn_partition_id, pulled_entry->magic); + return xpcBadMagic; + } + + /* they've not been initialized yet */ + return xpcRetry; + } + + if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { + + /* validate the variables */ + + if (pulled_entry->GPs_pa == 0 || + pulled_entry->openclose_args_pa == 0 || + pulled_entry->IPI_amo_pa == 0) { + + dev_err(xpc_chan, "partition %d's XPC vars_part for " + "partition %d are not valid\n", partid, + sn_partition_id); + return xpcInvalidAddress; + } + + /* the variables we imported look to be valid */ + + part->remote_GPs_pa = pulled_entry->GPs_pa; + part->remote_openclose_args_pa = + pulled_entry->openclose_args_pa; + part->remote_IPI_amo_va = + (AMO_t *)__va(pulled_entry->IPI_amo_pa); + part->remote_IPI_nasid = pulled_entry->IPI_nasid; + part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; + + if (part->nchannels > pulled_entry->nchannels) + part->nchannels = pulled_entry->nchannels; + + /* let the other side know that we've pulled their variables */ + + xpc_vars_part[partid].magic = XPC_VP_MAGIC2; + } + + if (pulled_entry->magic == XPC_VP_MAGIC1) + return xpcRetry; + + return xpcSuccess; +} + +/* + * Get the IPI flags and pull the openclose args and/or remote GPs as needed. + */ +static u64 +xpc_get_IPI_flags(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo; + enum xpc_retval ret; + + /* + * See if there are any IPI flags to be handled. + */ + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + IPI_amo = part->local_IPI_amo; + if (IPI_amo != 0) + part->local_IPI_amo = 0; + + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, + part->remote_openclose_args, + (void *)part-> + remote_openclose_args_pa, + XPC_OPENCLOSE_ARGS_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull openclose args from " + "partition %d, ret=%d\n", XPC_PARTID(part), + ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, part->remote_GPs, + (void *)part->remote_GPs_pa, + XPC_GP_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull GPs from partition " + "%d, ret=%d\n", XPC_PARTID(part), ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + return IPI_amo; +} + +/* + * Allocate the local message queue and the notify queue. + */ +static enum xpc_retval +xpc_allocate_local_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, + GFP_KERNEL, + &ch->local_msgqueue_base); + if (ch->local_msgqueue == NULL) + continue; + + nbytes = nentries * sizeof(struct xpc_notify); + ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); + if (ch->notify_queue == NULL) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + continue; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->local_nentries, ch->partid, ch->number); + + ch->local_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for local message queue and notify " + "queue, partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + +/* + * Allocate the cached remote message queue. + */ +static enum xpc_retval +xpc_allocate_remote_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + DBUG_ON(ch->remote_nentries <= 0); + + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, + GFP_KERNEL, + &ch->remote_msgqueue_base); + if (ch->remote_msgqueue == NULL) + continue; + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->remote_nentries, ch->partid, ch->number); + + ch->remote_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + +/* + * Allocate message queues and other stuff associated with a channel. + * + * Note: Assumes all of the channel sizes are filled in. + */ +static enum xpc_retval +xpc_allocate_msgqueues(struct xpc_channel *ch) +{ + unsigned long irq_flags; + enum xpc_retval ret; + + DBUG_ON(ch->flags & XPC_C_SETUP); + + ret = xpc_allocate_local_msgqueue(ch); + if (ret != xpcSuccess) + return ret; + + ret = xpc_allocate_remote_msgqueue(ch); + if (ret != xpcSuccess) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + return ret; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_SETUP; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + +/* + * Process a connect message from a remote partition. + * + * Note: xpc_process_connect() is expecting to be called with the + * spin_lock_irqsave held and will leave it locked upon return. + */ +static void +xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + enum xpc_retval ret; + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_ROPENREQUEST)) { + /* nothing more to do for now */ + return; + } + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); + + if (!(ch->flags & XPC_C_SETUP)) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + ret = xpc_allocate_msgqueues(ch); + spin_lock_irqsave(&ch->lock, *irq_flags); + + if (ret != xpcSuccess) + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + + if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) + return; + + DBUG_ON(!(ch->flags & XPC_C_SETUP)); + DBUG_ON(ch->local_msgqueue == NULL); + DBUG_ON(ch->remote_msgqueue == NULL); + } + + if (!(ch->flags & XPC_C_OPENREPLY)) { + ch->flags |= XPC_C_OPENREPLY; + xpc_IPI_send_openreply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENREPLY)) + return; + + DBUG_ON(ch->remote_msgqueue_pa == 0); + + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ + + dev_info(xpc_chan, "channel %d to partition %d connected\n", + ch->number, ch->partid); + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_create_kthreads(ch, 1, 0); + spin_lock_irqsave(&ch->lock, *irq_flags); +} + +/* + * Notify those who wanted to be notified upon delivery of their message. + */ +static void +xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) +{ + struct xpc_notify *notify; + u8 notify_type; + s64 get = ch->w_remote_GP.get - 1; + + while (++get < put && atomic_read(&ch->n_to_notify) > 0) { + + notify = &ch->notify_queue[get % ch->local_nentries]; + + /* + * See if the notify entry indicates it was associated with + * a message who's sender wants to be notified. It is possible + * that it is, but someone else is doing or has done the + * notification. + */ + notify_type = notify->type; + if (notify_type == 0 || + cmpxchg(¬ify->type, notify_type, 0) != notify_type) { + continue; + } + + DBUG_ON(notify_type != XPC_N_CALL); + + atomic_dec(&ch->n_to_notify); + + if (notify->func != NULL) { + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *)notify, get, ch->partid, ch->number); + + notify->func(reason, ch->partid, ch->number, + notify->key); + + dev_dbg(xpc_chan, "notify->func() returned, " + "notify=0x%p, msg_number=%ld, partid=%d, " + "channel=%d\n", (void *)notify, get, + ch->partid, ch->number); + } + } +} + +/* + * Free up message queues and other stuff that were allocated for the specified + * channel. + * + * Note: ch->reason and ch->reason_line are left set for debugging purposes, + * they're cleared when XPC_C_DISCONNECTED is cleared. + */ +static void +xpc_free_msgqueues(struct xpc_channel *ch) +{ + DBUG_ON(!spin_is_locked(&ch->lock)); + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + + ch->remote_msgqueue_pa = 0; + ch->func = NULL; + ch->key = NULL; + ch->msg_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; + + ch->local_GP->get = 0; + ch->local_GP->put = 0; + ch->remote_GP.get = 0; + ch->remote_GP.put = 0; + ch->w_local_GP.get = 0; + ch->w_local_GP.put = 0; + ch->w_remote_GP.get = 0; + ch->w_remote_GP.put = 0; + ch->next_msg_to_pull = 0; + + if (ch->flags & XPC_C_SETUP) { + ch->flags &= ~XPC_C_SETUP; + + dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", + ch->flags, ch->partid, ch->number); + + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->remote_msgqueue_base); + ch->remote_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + } +} + +/* + * spin_lock_irqsave() is expected to be held on entry. + */ +static void +xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_DISCONNECTING)) + return; + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + /* make sure all activity has settled down first */ + + if (atomic_read(&ch->kthreads_assigned) > 0 || + atomic_read(&ch->references) > 0) { + return; + } + DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); + + if (part->act_state == XPC_P_DEACTIVATING) { + /* can't proceed until the other side disengages from us */ + if (xpc_partition_engaged(1UL << ch->partid)) + return; + + } else { + + /* as long as the other side is up do the full protocol */ + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) + return; + + if (!(ch->flags & XPC_C_CLOSEREPLY)) { + ch->flags |= XPC_C_CLOSEREPLY; + xpc_IPI_send_closereply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_RCLOSEREPLY)) + return; + } + + /* wake those waiting for notify completion */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* >>> we do callout while holding ch->lock */ + xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put); + } + + /* both sides are disconnected now */ + + if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_disconnect_callout(ch, xpcDisconnected); + spin_lock_irqsave(&ch->lock, *irq_flags); + } + + /* it's now safe to free the channel's message queues */ + xpc_free_msgqueues(ch); + + /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */ + ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); + + atomic_dec(&part->nchannels_active); + + if (channel_was_connected) { + dev_info(xpc_chan, "channel %d to partition %d disconnected, " + "reason=%d\n", ch->number, ch->partid, ch->reason); + } + + if (ch->flags & XPC_C_WDISCONNECT) { + /* we won't lose the CPU since we're holding ch->lock */ + complete(&ch->wdisconnect_wait); + } else if (ch->delayed_IPI_flags) { + if (part->act_state != XPC_P_DEACTIVATING) { + /* time to take action on any delayed IPI flags */ + spin_lock(&part->IPI_lock); + XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, + ch->delayed_IPI_flags); + spin_unlock(&part->IPI_lock); + } + ch->delayed_IPI_flags = 0; + } +} + +/* + * Process a change in the channel's remote connection state. + */ +static void +xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, + u8 IPI_flags) +{ + unsigned long irq_flags; + struct xpc_openclose_args *args = + &part->remote_openclose_args[ch_number]; + struct xpc_channel *ch = &part->channels[ch_number]; + enum xpc_retval reason; + + spin_lock_irqsave(&ch->lock, irq_flags); + +again: + + if ((ch->flags & XPC_C_DISCONNECTED) && + (ch->flags & XPC_C_WDISCONNECT)) { + /* + * Delay processing IPI flags until thread waiting disconnect + * has had a chance to see that the channel is disconnected. + */ + ch->delayed_IPI_flags |= IPI_flags; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + if (IPI_flags & XPC_IPI_CLOSEREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " + "from partid=%d, channel=%d\n", args->reason, + ch->partid, ch->number); + + /* + * If RCLOSEREQUEST is set, we're probably waiting for + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed + * with this RCLOSEREQUEST in the IPI_flags. + */ + + if (ch->flags & XPC_C_RCLOSEREQUEST) { + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); + + DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); + IPI_flags &= ~XPC_IPI_CLOSEREPLY; + ch->flags |= XPC_C_RCLOSEREPLY; + + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + goto again; + } + + if (ch->flags & XPC_C_DISCONNECTED) { + if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { + if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, + ch_number) & + XPC_IPI_OPENREQUEST)) { + + DBUG_ON(ch->delayed_IPI_flags != 0); + spin_lock(&part->IPI_lock); + XPC_SET_IPI_FLAGS(part->local_IPI_amo, + ch_number, + XPC_IPI_CLOSEREQUEST); + spin_unlock(&part->IPI_lock); + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); + } + + IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); + + /* + * The meaningful CLOSEREQUEST connection state fields are: + * reason = reason connection is to be closed + */ + + ch->flags |= XPC_C_RCLOSEREQUEST; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + reason = args->reason; + if (reason <= xpcSuccess || reason > xpcUnknownReason) + reason = xpcUnknownReason; + else if (reason == xpcUnregistering) + reason = xpcOtherUnregistering; + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + xpc_process_disconnect(ch, &irq_flags); + } + + if (IPI_flags & XPC_IPI_CLOSEREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," + " channel=%d\n", ch->partid, ch->number); + + if (ch->flags & XPC_C_DISCONNECTED) { + DBUG_ON(part->act_state != XPC_P_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { + if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) + & XPC_IPI_CLOSEREQUEST)) { + + DBUG_ON(ch->delayed_IPI_flags != 0); + spin_lock(&part->IPI_lock); + XPC_SET_IPI_FLAGS(part->local_IPI_amo, + ch_number, + XPC_IPI_CLOSEREPLY); + spin_unlock(&part->IPI_lock); + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + ch->flags |= XPC_C_RCLOSEREPLY; + + if (ch->flags & XPC_C_CLOSEREPLY) { + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + } + + if (IPI_flags & XPC_IPI_OPENREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " + "local_nentries=%d) received from partid=%d, " + "channel=%d\n", args->msg_size, args->local_nentries, + ch->partid, ch->number); + + if (part->act_state == XPC_P_DEACTIVATING || + (ch->flags & XPC_C_ROPENREQUEST)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { + ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | + XPC_C_OPENREQUEST))); + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_OPENREPLY | XPC_C_CONNECTED)); + + /* + * The meaningful OPENREQUEST connection state fields are: + * msg_size = size of channel's messages in bytes + * local_nentries = remote partition's local_nentries + */ + if (args->msg_size == 0 || args->local_nentries == 0) { + /* assume OPENREQUEST was delayed by mistake */ + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); + ch->remote_nentries = args->local_nentries; + + if (ch->flags & XPC_C_OPENREQUEST) { + if (args->msg_size != ch->msg_size) { + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + } else { + ch->msg_size = args->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + } + + xpc_process_connect(ch, &irq_flags); + } + + if (IPI_flags & XPC_IPI_OPENREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " + "local_nentries=%d, remote_nentries=%d) received from " + "partid=%d, channel=%d\n", args->local_msgqueue_pa, + args->local_nentries, args->remote_nentries, + ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + if (!(ch->flags & XPC_C_OPENREQUEST)) { + XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + + /* + * The meaningful OPENREPLY connection state fields are: + * local_msgqueue_pa = physical address of remote + * partition's local_msgqueue + * local_nentries = remote partition's local_nentries + * remote_nentries = remote partition's remote_nentries + */ + DBUG_ON(args->local_msgqueue_pa == 0); + DBUG_ON(args->local_nentries == 0); + DBUG_ON(args->remote_nentries == 0); + + ch->flags |= XPC_C_ROPENREPLY; + ch->remote_msgqueue_pa = args->local_msgqueue_pa; + + if (args->local_nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "remote_nentries=%d, old remote_nentries=%d, " + "partid=%d, channel=%d\n", + args->local_nentries, ch->remote_nentries, + ch->partid, ch->number); + + ch->remote_nentries = args->local_nentries; + } + if (args->remote_nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "local_nentries=%d, old local_nentries=%d, " + "partid=%d, channel=%d\n", + args->remote_nentries, ch->local_nentries, + ch->partid, ch->number); + + ch->local_nentries = args->remote_nentries; + } + + xpc_process_connect(ch, &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + +/* + * Attempt to establish a channel connection to a remote partition. + */ +static enum xpc_retval +xpc_connect_channel(struct xpc_channel *ch) +{ + unsigned long irq_flags; + struct xpc_registration *registration = &xpc_registrations[ch->number]; + + if (mutex_trylock(®istration->mutex) == 0) + return xpcRetry; + + if (!XPC_CHANNEL_REGISTERED(ch->number)) { + mutex_unlock(®istration->mutex); + return xpcUnregistered; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + mutex_unlock(®istration->mutex); + return ch->reason; + } + + /* add info from the channel connect registration to the channel */ + + ch->kthreads_assigned_limit = registration->assigned_limit; + ch->kthreads_idle_limit = registration->idle_limit; + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); + + ch->func = registration->func; + DBUG_ON(registration->func == NULL); + ch->key = registration->key; + + ch->local_nentries = registration->nentries; + + if (ch->flags & XPC_C_ROPENREQUEST) { + if (registration->msg_size != ch->msg_size) { + /* the local and remote sides aren't the same */ + + /* + * Because XPC_DISCONNECT_CHANNEL() can block we're + * forced to up the registration sema before we unlock + * the channel lock. But that's okay here because we're + * done with the part that required the registration + * sema. XPC_DISCONNECT_CHANNEL() requires that the + * channel lock be locked and will unlock and relock + * the channel lock as needed. + */ + mutex_unlock(®istration->mutex); + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcUnequalMsgSizes; + } + } else { + ch->msg_size = registration->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); + } + + mutex_unlock(®istration->mutex); + + /* initiate the connection */ + + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); + xpc_IPI_send_openrequest(ch, &irq_flags); + + xpc_process_connect(ch, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + +/* + * Clear some of the msg flags in the local message queue. + */ +static inline void +xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 get; + + get = ch->w_remote_GP.get; + do { + msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + (get % ch->local_nentries) * + ch->msg_size); + msg->flags = 0; + } while (++get < ch->remote_GP.get); +} + +/* + * Clear some of the msg flags in the remote message queue. + */ +static inline void +xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 put; + + put = ch->w_remote_GP.put; + do { + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + + (put % ch->remote_nentries) * + ch->msg_size); + msg->flags = 0; + } while (++put < ch->remote_GP.put); +} + +static void +xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int nmsgs_sent; + + ch->remote_GP = part->remote_GPs[ch_number]; + + /* See what, if anything, has changed for each connected channel */ + + xpc_msgqueue_ref(ch); + + if (ch->w_remote_GP.get == ch->remote_GP.get && + ch->w_remote_GP.put == ch->remote_GP.put) { + /* nothing changed since GPs were last pulled */ + xpc_msgqueue_deref(ch); + return; + } + + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return; + } + + /* + * First check to see if messages recently sent by us have been + * received by the other side. (The remote GET value will have + * changed since we last looked at it.) + */ + + if (ch->w_remote_GP.get != ch->remote_GP.get) { + + /* + * We need to notify any senders that want to be notified + * that their sent messages have been received by their + * intended recipients. We need to do this before updating + * w_remote_GP.get so that we don't allocate the same message + * queue entries prematurely (see xpc_allocate_msg()). + */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* + * Notify senders that messages sent have been + * received and delivered by the other side. + */ + xpc_notify_senders(ch, xpcMsgDelivered, + ch->remote_GP.get); + } + + /* + * Clear msg->flags in previously sent messages, so that + * they're ready for xpc_allocate_msg(). + */ + xpc_clear_local_msgqueue_flags(ch); + + ch->w_remote_GP.get = ch->remote_GP.get; + + dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.get, ch->partid, + ch->number); + + /* + * If anyone was waiting for message queue entries to become + * available, wake them up. + */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); + } + + /* + * Now check for newly sent messages by the other side. (The remote + * PUT value will have changed since we last looked at it.) + */ + + if (ch->w_remote_GP.put != ch->remote_GP.put) { + /* + * Clear msg->flags in previously received messages, so that + * they're ready for xpc_get_deliverable_msg(). + */ + xpc_clear_remote_msgqueue_flags(ch); + + ch->w_remote_GP.put = ch->remote_GP.put; + + dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.put, ch->partid, + ch->number); + + nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get; + if (nmsgs_sent > 0) { + dev_dbg(xpc_chan, "msgs waiting to be copied and " + "delivered=%d, partid=%d, channel=%d\n", + nmsgs_sent, ch->partid, ch->number); + + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) + xpc_activate_kthreads(ch, nmsgs_sent); + } + } + + xpc_msgqueue_deref(ch); +} + +void +xpc_process_channel_activity(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo, IPI_flags; + struct xpc_channel *ch; + int ch_number; + u32 ch_flags; + + IPI_amo = xpc_get_IPI_flags(part); + + /* + * Initiate channel connections for registered channels. + * + * For each connected channel that has pending messages activate idle + * kthreads and/or create new kthreads as needed. + */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + /* + * Process any open or close related IPI flags, and then deal + * with connecting or disconnecting the channel as required. + */ + + IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) + xpc_process_openclose_IPI(part, ch_number, IPI_flags); + + ch_flags = ch->flags; /* need an atomic snapshot of flags */ + + if (ch_flags & XPC_C_DISCONNECTING) { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_disconnect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + continue; + } + + if (part->act_state == XPC_P_DEACTIVATING) + continue; + + if (!(ch_flags & XPC_C_CONNECTED)) { + if (!(ch_flags & XPC_C_OPENREQUEST)) { + DBUG_ON(ch_flags & XPC_C_SETUP); + (void)xpc_connect_channel(ch); + } else { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_connect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + continue; + } + + /* + * Process any message related IPI flags, this may involve the + * activation of kthreads to deliver any pending messages sent + * from the other partition. + */ + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) + xpc_process_msg_IPI(part, ch_number); + } +} + +/* + * XPC's heartbeat code calls this function to inform XPC that a partition is + * going down. XPC responds by tearing down the XPartition Communication + * infrastructure used for the just downed partition. + * + * XPC's heartbeat code will never call this function and xpc_partition_up() + * at the same time. Nor will it ever make multiple calls to either function + * at the same time. + */ +void +xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) +{ + unsigned long irq_flags; + int ch_number; + struct xpc_channel *ch; + + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", + XPC_PARTID(part), reason); + + if (!xpc_part_ref(part)) { + /* infrastructure for this partition isn't currently set up */ + return; + } + + /* disconnect channels associated with the partition going down */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + xpc_msgqueue_ref(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + xpc_msgqueue_deref(ch); + } + + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); +} + +/* + * Teardown the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +void +xpc_teardown_infrastructure(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + + /* + * We start off by making this partition inaccessible to local + * processes by marking it as no longer setup. Then we make it + * inaccessible to remote processes by clearing the XPC per partition + * specific variable's magic # (which indicates that these variables + * are no longer valid) and by ignoring all XPC notify IPIs sent to + * this partition. + */ + + DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + DBUG_ON(part->setup_state != XPC_P_SETUP); + part->setup_state = XPC_P_WTEARDOWN; + + xpc_vars_part[partid].magic = 0; + + free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); + + /* + * Before proceeding with the teardown we have to wait until all + * existing references cease. + */ + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + /* now we can begin tearing down the infrastructure */ + + part->setup_state = XPC_P_TORNDOWN; + + /* in case we've still got outstanding timers registered... */ + del_timer_sync(&part->dropped_IPI_timer); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + part->local_IPI_amo_va = NULL; +} + +/* + * Called by XP at the time of channel connection registration to cause + * XPC to establish connections to all currently active partitions. + */ +void +xpc_initiate_connect(int ch_number) +{ + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + + /* + * Initiate the establishment of a connection on the + * newly registered channel to the remote partition. + */ + xpc_wakeup_channel_mgr(part); + xpc_part_deref(part); + } + } +} + +void +xpc_connected_callout(struct xpc_channel *ch) +{ + /* let the registerer know that a connection has been established */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + ch->func(xpcConnected, ch->partid, ch->number, + (void *)(u64)ch->local_nentries, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + } +} + +/* + * Called by XP at the time of channel connection unregistration to cause + * XPC to teardown all current connections for the specified channel. + * + * Before returning xpc_initiate_disconnect() will wait until all connections + * on the specified channel have been closed/torndown. So the caller can be + * assured that they will not be receiving any more callouts from XPC to the + * function they registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_initiate_disconnect(int ch_number) +{ + unsigned long irq_flags; + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + /* initiate the channel disconnect for every active partition */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + + if (!(ch->flags & XPC_C_DISCONNECTED)) { + ch->flags |= XPC_C_WDISCONNECT; + + XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, + &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + } + } + + xpc_disconnect_wait(ch_number); +} + +/* + * To disconnect a channel, and reflect it back to all who may be waiting. + * + * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by + * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by + * xpc_disconnect_wait(). + * + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. + */ +void +xpc_disconnect_channel(const int line, struct xpc_channel *ch, + enum xpc_retval reason, unsigned long *irq_flags) +{ + u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + return; + + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); + + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", + reason, line, ch->partid, ch->number); + + XPC_SET_REASON(ch, reason, line); + + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); + /* some of these may not have been set */ + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_CONNECTING | XPC_C_CONNECTED); + + xpc_IPI_send_closerequest(ch, irq_flags); + + if (channel_was_connected) + ch->flags |= XPC_C_WASCONNECTED; + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + /* wake all idle kthreads so they can exit */ + if (atomic_read(&ch->kthreads_idle) > 0) { + wake_up_all(&ch->idle_wq); + + } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + /* start a kthread that will do the xpcDisconnecting callout */ + xpc_create_kthreads(ch, 1, 1); + } + + /* wake those waiting to allocate an entry from the local msg queue */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); + + spin_lock_irqsave(&ch->lock, *irq_flags); +} + +void +xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) +{ + /* + * Let the channel's registerer know that the channel is being + * disconnected. We don't want to do this if the registerer was never + * informed of a connection being made. + */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + + ch->func(reason, ch->partid, ch->number, NULL, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + } +} + +/* + * Wait for a message entry to become available for the specified channel, + * but don't wait any longer than 1 jiffy. + */ +static enum xpc_retval +xpc_allocate_msg_wait(struct xpc_channel *ch) +{ + enum xpc_retval ret; + + if (ch->flags & XPC_C_DISCONNECTING) { + DBUG_ON(ch->reason == xpcInterrupted); + return ch->reason; + } + + atomic_inc(&ch->n_on_msg_allocate_wq); + ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + atomic_dec(&ch->n_on_msg_allocate_wq); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + DBUG_ON(ch->reason == xpcInterrupted); + } else if (ret == 0) { + ret = xpcTimeout; + } else { + ret = xpcInterrupted; + } + + return ret; +} + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. + */ +static enum xpc_retval +xpc_allocate_msg(struct xpc_channel *ch, u32 flags, + struct xpc_msg **address_of_msg) +{ + struct xpc_msg *msg; + enum xpc_retval ret; + s64 put; + + /* this reference will be dropped in xpc_send_msg() */ + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + xpc_msgqueue_deref(ch); + return ch->reason; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return xpcNotConnected; + } + + /* + * Get the next available message entry from the local message queue. + * If none are available, we'll make sure that we grab the latest + * GP values. + */ + ret = xpcTimeout; + + while (1) { + + put = ch->w_local_GP.put; + rmb(); /* guarantee that .put loads before .get */ + if (put - ch->w_remote_GP.get < ch->local_nentries) { + + /* There are available message entries. We need to try + * to secure one for ourselves. We'll do this by trying + * to increment w_local_GP.put as long as someone else + * doesn't beat us to it. If they do, we'll have to + * try again. + */ + if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) { + /* we got the entry referenced by put */ + break; + } + continue; /* try again */ + } + + /* + * There aren't any available msg entries at this time. + * + * In waiting for a message entry to become available, + * we set a timeout in case the other side is not + * sending completion IPIs. This lets us fake an IPI + * that will cause the IPI handler to fetch the latest + * GP values as if an IPI was sent by the other side. + */ + if (ret == xpcTimeout) + xpc_IPI_send_local_msgrequest(ch); + + if (flags & XPC_NOWAIT) { + xpc_msgqueue_deref(ch); + return xpcNoWait; + } + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpcInterrupted && ret != xpcTimeout) { + xpc_msgqueue_deref(ch); + return ret; + } + } + + /* get the message's address and initialize it */ + msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + (put % ch->local_nentries) * ch->msg_size); + + DBUG_ON(msg->flags != 0); + msg->number = put; + + dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", put + 1, + (void *)msg, msg->number, ch->partid, ch->number); + + *address_of_msg = msg; + + return xpcSuccess; +} + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. NOTE that this routine can sleep waiting for a message + * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel #. + * flags - see xpc.h for valid flags. + * payload - address of the allocated payload area pointer (filled in on + * return) in which the user-defined message is constructed. + */ +enum xpc_retval +xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xpc_retval ret = xpcUnknownReason; + struct xpc_msg *msg = NULL; + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + *payload = NULL; + + if (xpc_part_ref(part)) { + ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); + xpc_part_deref(part); + + if (msg != NULL) + *payload = &msg->payload; + } + + return ret; +} + +/* + * Now we actually send the messages that are ready to be sent by advancing + * the local message queue's Put value and then send an IPI to the recipient + * partition. + */ +static void +xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) +{ + struct xpc_msg *msg; + s64 put = initial_put + 1; + int send_IPI = 0; + + while (1) { + + while (1) { + if (put == ch->w_local_GP.put) + break; + + msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + (put % ch->local_nentries) * + ch->msg_size); + + if (!(msg->flags & XPC_M_READY)) + break; + + put++; + } + + if (put == initial_put) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != + initial_put) { + /* someone else beat us to it */ + DBUG_ON(ch->local_GP->put < initial_put); + break; + } + + /* we just set the new value of local_GP->put */ + + dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " + "channel=%d\n", put, ch->partid, ch->number); + + send_IPI = 1; + + /* + * We need to ensure that the message referenced by + * local_GP->put is not XPC_M_READY or that local_GP->put + * equals w_local_GP.put, so we'll go have a look. + */ + initial_put = put; + } + + if (send_IPI) + xpc_IPI_send_msgrequest(ch); +} + +/* + * Common code that does the actual sending of the message by advancing the + * local message queue's Put value and sends an IPI to the partition the + * message is being sent to. + */ +static enum xpc_retval +xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, + xpc_notify_func func, void *key) +{ + enum xpc_retval ret = xpcSuccess; + struct xpc_notify *notify = notify; + s64 put, msg_number = msg->number; + + DBUG_ON(notify_type == XPC_N_CALL && func == NULL); + DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) != + msg_number % ch->local_nentries); + DBUG_ON(msg->flags & XPC_M_READY); + + if (ch->flags & XPC_C_DISCONNECTING) { + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ch->reason; + } + + if (notify_type != 0) { + /* + * Tell the remote side to send an ACK interrupt when the + * message has been delivered. + */ + msg->flags |= XPC_M_INTERRUPT; + + atomic_inc(&ch->n_to_notify); + + notify = &ch->notify_queue[msg_number % ch->local_nentries]; + notify->func = func; + notify->key = key; + notify->type = notify_type; + + /* >>> is a mb() needed here? */ + + if (ch->flags & XPC_C_DISCONNECTING) { + /* + * An error occurred between our last error check and + * this one. We will try to clear the type field from + * the notify entry. If we succeed then + * xpc_disconnect_channel() didn't already process + * the notify entry. + */ + if (cmpxchg(¬ify->type, notify_type, 0) == + notify_type) { + atomic_dec(&ch->n_to_notify); + ret = ch->reason; + } + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; + } + } + + msg->flags |= XPC_M_READY; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->put. + */ + mb(); + + /* see if the message is next in line to be sent, if so send it */ + + put = ch->local_GP->put; + if (put == msg_number) + xpc_send_msgs(ch, put); + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; +} + +/* + * Send a message previously allocated using xpc_initiate_allocate() on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be received, nor will + * notification be given when it does happen. Once this routine has returned + * the message entry allocated via xpc_initiate_allocate() is no longer + * accessable to the caller. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +enum xpc_retval +xpc_initiate_send(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); + + return ret; +} + +/* + * Send a message previously allocated using xpc_initiate_allocate on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be sent. Once this routine + * has returned the message entry allocated via xpc_initiate_allocate() is no + * longer accessable to the caller. + * + * Once the remote end of the channel has received the message, the function + * passed as an argument to xpc_initiate_send_notify() will be called. This + * allows the sender to free up or re-use any buffers referenced by the + * message, but does NOT mean the message has been processed at the remote + * end by a receiver. + * + * If this routine returns an error, the caller's function will NOT be called. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + * func - function to call with asynchronous notification of message + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. + * key - user-defined key to be passed to the function when it's called. + */ +enum xpc_retval +xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, + xpc_notify_func func, void *key) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + DBUG_ON(func == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, + func, key); + return ret; +} + +static struct xpc_msg * +xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct xpc_msg *remote_msg, *msg; + u32 msg_index, nmsgs; + u64 msg_offset; + enum xpc_retval ret; + + if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { + /* we were interrupted by a signal */ + return NULL; + } + + while (get >= ch->next_msg_to_pull) { + + /* pull as many messages as are ready and able to be pulled */ + + msg_index = ch->next_msg_to_pull % ch->remote_nentries; + + DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); + nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; + if (msg_index + nmsgs > ch->remote_nentries) { + /* ignore the ones that wrap the msg queue for now */ + nmsgs = ch->remote_nentries - msg_index; + } + + msg_offset = msg_index * ch->msg_size; + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); + remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + + msg_offset); + + ret = xpc_pull_remote_cachelines(part, msg, remote_msg, + nmsgs * ch->msg_size); + if (ret != xpcSuccess) { + + dev_dbg(xpc_chan, "failed to pull %d msgs starting with" + " msg %ld from partition %d, channel=%d, " + "ret=%d\n", nmsgs, ch->next_msg_to_pull, + ch->partid, ch->number, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + + mutex_unlock(&ch->msg_to_pull_mutex); + return NULL; + } + + ch->next_msg_to_pull += nmsgs; + } + + mutex_unlock(&ch->msg_to_pull_mutex); + + /* return the message we were looking for */ + msg_offset = (get % ch->remote_nentries) * ch->msg_size; + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); + + return msg; +} + +/* + * Get a message to be delivered. + */ +static struct xpc_msg * +xpc_get_deliverable_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg = NULL; + s64 get; + + do { + if (ch->flags & XPC_C_DISCONNECTING) + break; + + get = ch->w_local_GP.get; + rmb(); /* guarantee that .get loads before .put */ + if (get == ch->w_remote_GP.put) + break; + + /* There are messages waiting to be pulled and delivered. + * We need to try to secure one for ourselves. We'll do this + * by trying to increment w_local_GP.get and hope that no one + * else beats us to it. If they do, we'll we'll simply have + * to try again for the next one. + */ + + if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { + /* we got the entry referenced by get */ + + dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " + "partid=%d, channel=%d\n", get + 1, + ch->partid, ch->number); + + /* pull the message from the remote partition */ + + msg = xpc_pull_remote_msg(ch, get); + + DBUG_ON(msg != NULL && msg->number != get); + DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); + DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); + + break; + } + + } while (1); + + return msg; +} + +/* + * Deliver a message to its intended recipient. + */ +void +xpc_deliver_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + + msg = xpc_get_deliverable_msg(ch); + if (msg != NULL) { + + /* + * This ref is taken to protect the payload itself from being + * freed before the user is finished with it, which the user + * indicates by calling xpc_initiate_received(). + */ + xpc_msgqueue_ref(ch); + + atomic_inc(&ch->kthreads_active); + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *)msg, msg->number, ch->partid, + ch->number); + + /* deliver the message to its intended recipient */ + ch->func(xpcMsgReceived, ch->partid, ch->number, + &msg->payload, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *)msg, msg->number, ch->partid, + ch->number); + } + + atomic_dec(&ch->kthreads_active); + } +} + +/* + * Now we actually acknowledge the messages that have been delivered and ack'd + * by advancing the cached remote message queue's Get value and if requested + * send an IPI to the message sender's partition. + */ +static void +xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) +{ + struct xpc_msg *msg; + s64 get = initial_get + 1; + int send_IPI = 0; + + while (1) { + + while (1) { + if (get == ch->w_local_GP.get) + break; + + msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + + (get % ch->remote_nentries) * + ch->msg_size); + + if (!(msg->flags & XPC_M_DONE)) + break; + + msg_flags |= msg->flags; + get++; + } + + if (get == initial_get) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != + initial_get) { + /* someone else beat us to it */ + DBUG_ON(ch->local_GP->get <= initial_get); + break; + } + + /* we just set the new value of local_GP->get */ + + dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " + "channel=%d\n", get, ch->partid, ch->number); + + send_IPI = (msg_flags & XPC_M_INTERRUPT); + + /* + * We need to ensure that the message referenced by + * local_GP->get is not XPC_M_DONE or that local_GP->get + * equals w_local_GP.get, so we'll go have a look. + */ + initial_get = get; + } + + if (send_IPI) + xpc_IPI_send_msgrequest(ch); +} + +/* + * Acknowledge receipt of a delivered message. + * + * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition + * that sent the message. + * + * This function, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # message received on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +void +xpc_initiate_received(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + s64 get, msg_number = msg->number; + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + ch = &part->channels[ch_number]; + + dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", + (void *)msg, msg_number, ch->partid, ch->number); + + DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) != + msg_number % ch->remote_nentries); + DBUG_ON(msg->flags & XPC_M_DONE); + + msg->flags |= XPC_M_DONE; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->get. + */ + mb(); + + /* + * See if this message is next in line to be acknowledged as having + * been delivered. + */ + get = ch->local_GP->get; + if (get == msg_number) + xpc_acknowledge_msgs(ch, get, msg->flags); + + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ + xpc_msgqueue_deref(ch); +} diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c new file mode 100644 index 00000000000..f673ba90eb0 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -0,0 +1,1323 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) support - standard version. + * + * XPC provides a message passing capability that crosses partition + * boundaries. This module is made up of two parts: + * + * partition This part detects the presence/absence of other + * partitions. It provides a heartbeat and monitors + * the heartbeats of other partitions. + * + * channel This part manages the channels and sends/receives + * messages across them to/from other partitions. + * + * There are a couple of additional functions residing in XP, which + * provide an interface to XPC for its users. + * + * + * Caveats: + * + * . We currently have no way to determine which nasid an IPI came + * from. Thus, xpc_IPI_send() does a remote AMO write followed by + * an IPI. The AMO indicates where data is to be pulled from, so + * after the IPI arrives, the remote partition checks the AMO word. + * The IPI can actually arrive before the AMO however, so other code + * must periodically check for this case. Also, remote AMO operations + * do not reliably time out. Thus we do a remote PIO read solely to + * know whether the remote partition is down and whether we should + * stop sending IPIs to it. This remote PIO read operation is set up + * in a special nofault region so SAL knows to ignore (and cleanup) + * any errors due to the remote AMO write, PIO read, and/or PIO + * write operations. + * + * If/when new hardware solves this IPI problem, we should abandon + * the current approach. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cache.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/completion.h> +#include <linux/kdebug.h> +#include <linux/kthread.h> +#include <linux/uaccess.h> +#include <asm/sn/intr.h> +#include <asm/sn/sn_sal.h> +#include "xpc.h" + +/* define two XPC debug device structures to be used with dev_dbg() et al */ + +struct device_driver xpc_dbg_name = { + .name = "xpc" +}; + +struct device xpc_part_dbg_subname = { + .bus_id = {0}, /* set to "part" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device xpc_chan_dbg_subname = { + .bus_id = {0}, /* set to "chan" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device *xpc_part = &xpc_part_dbg_subname; +struct device *xpc_chan = &xpc_chan_dbg_subname; + +static int xpc_kdebug_ignore; + +/* systune related variables for /proc/sys directories */ + +static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; +static int xpc_hb_min_interval = 1; +static int xpc_hb_max_interval = 10; + +static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; +static int xpc_hb_check_min_interval = 10; +static int xpc_hb_check_max_interval = 120; + +int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; +static int xpc_disengage_request_min_timelimit; /* = 0 */ +static int xpc_disengage_request_max_timelimit = 120; + +static ctl_table xpc_sys_xpc_hb_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb_interval", + .data = &xpc_hb_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_hb_min_interval, + .extra2 = &xpc_hb_max_interval}, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb_check_interval", + .data = &xpc_hb_check_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_hb_check_min_interval, + .extra2 = &xpc_hb_check_max_interval}, + {} +}; +static ctl_table xpc_sys_xpc_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hb", + .mode = 0555, + .child = xpc_sys_xpc_hb_dir}, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "disengage_request_timelimit", + .data = &xpc_disengage_request_timelimit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xpc_disengage_request_min_timelimit, + .extra2 = &xpc_disengage_request_max_timelimit}, + {} +}; +static ctl_table xpc_sys_dir[] = { + { + .ctl_name = CTL_UNNUMBERED, + .procname = "xpc", + .mode = 0555, + .child = xpc_sys_xpc_dir}, + {} +}; +static struct ctl_table_header *xpc_sysctl; + +/* non-zero if any remote partition disengage request was timed out */ +int xpc_disengage_request_timedout; + +/* #of IRQs received */ +static atomic_t xpc_act_IRQ_rcvd; + +/* IRQ handler notifies this wait queue on receipt of an IRQ */ +static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); + +static unsigned long xpc_hb_check_timeout; + +/* notification that the xpc_hb_checker thread has exited */ +static DECLARE_COMPLETION(xpc_hb_checker_exited); + +/* notification that the xpc_discovery thread has exited */ +static DECLARE_COMPLETION(xpc_discovery_exited); + +static struct timer_list xpc_hb_timer; + +static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); + +static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_reboot_notifier = { + .notifier_call = xpc_system_reboot, +}; + +static int xpc_system_die(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, +}; + +/* + * Timer function to enforce the timelimit on the partition disengage request. + */ +static void +xpc_timeout_partition_disengage_request(unsigned long data) +{ + struct xpc_partition *part = (struct xpc_partition *)data; + + DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); + + (void)xpc_partition_disengaged(part); + + DBUG_ON(part->disengage_request_timeout != 0); + DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); +} + +/* + * Notify the heartbeat check thread that an IRQ has been received. + */ +static irqreturn_t +xpc_act_IRQ_handler(int irq, void *dev_id) +{ + atomic_inc(&xpc_act_IRQ_rcvd); + wake_up_interruptible(&xpc_act_IRQ_wq); + return IRQ_HANDLED; +} + +/* + * Timer to produce the heartbeat. The timer structures function is + * already set when this is initially called. A tunable is used to + * specify when the next timeout should occur. + */ +static void +xpc_hb_beater(unsigned long dummy) +{ + xpc_vars->heartbeat++; + + if (time_after_eq(jiffies, xpc_hb_check_timeout)) + wake_up_interruptible(&xpc_act_IRQ_wq); + + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); + add_timer(&xpc_hb_timer); +} + +/* + * This thread is responsible for nearly all of the partition + * activation/deactivation. + */ +static int +xpc_hb_checker(void *ignore) +{ + int last_IRQ_count = 0; + int new_IRQ_count; + int force_IRQ = 0; + + /* this thread was marked active by xpc_hb_init() */ + + set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); + + /* set our heartbeating to other partitions into motion */ + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); + xpc_hb_beater(0); + + while (!xpc_exiting) { + + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " + "been received\n", + (int)(xpc_hb_check_timeout - jiffies), + atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); + + /* checking of remote heartbeats is skewed by IRQ handling */ + if (time_after_eq(jiffies, xpc_hb_check_timeout)) { + dev_dbg(xpc_part, "checking remote heartbeats\n"); + xpc_check_remote_hb(); + + /* + * We need to periodically recheck to ensure no + * IPI/AMO pairs have been missed. That check + * must always reset xpc_hb_check_timeout. + */ + force_IRQ = 1; + } + + /* check for outstanding IRQs */ + new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); + if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { + force_IRQ = 0; + + dev_dbg(xpc_part, "found an IRQ to process; will be " + "resetting xpc_hb_check_timeout\n"); + + last_IRQ_count += xpc_identify_act_IRQ_sender(); + if (last_IRQ_count < new_IRQ_count) { + /* retry once to help avoid missing AMO */ + (void)xpc_identify_act_IRQ_sender(); + } + last_IRQ_count = new_IRQ_count; + + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + } + + /* wait for IRQ or timeout */ + (void)wait_event_interruptible(xpc_act_IRQ_wq, + (last_IRQ_count < + atomic_read(&xpc_act_IRQ_rcvd) + || time_after_eq(jiffies, + xpc_hb_check_timeout) || + xpc_exiting)); + } + + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_hb_checker_exited); + return 0; +} + +/* + * This thread will attempt to discover other partitions to activate + * based on info provided by SAL. This new thread is short lived and + * will exit once discovery is complete. + */ +static int +xpc_initiate_discovery(void *ignore) +{ + xpc_discovery(); + + dev_dbg(xpc_part, "discovery thread is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_discovery_exited); + return 0; +} + +/* + * Establish first contact with the remote partititon. This involves pulling + * the XPC per partition variables from the remote partition and waiting for + * the remote partition to pull ours. + */ +static enum xpc_retval +xpc_make_first_contact(struct xpc_partition *part) +{ + enum xpc_retval ret; + + while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { + if (ret != xpcRetry) { + XPC_DEACTIVATE_PARTITION(part, ret); + return ret; + } + + dev_dbg(xpc_chan, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + (void)msleep_interruptible(250); + + if (part->act_state == XPC_P_DEACTIVATING) + return part->reason; + } + + return xpc_mark_partition_active(part); +} + +/* + * The first kthread assigned to a newly activated partition is the one + * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to + * that kthread until the partition is brought down, at which time that kthread + * returns back to XPC HB. (The return of that kthread will signify to XPC HB + * that XPC has dismantled all communication infrastructure for the associated + * partition.) This kthread becomes the channel manager for that partition. + * + * Each active partition has a channel manager, who, besides connecting and + * disconnecting channels, will ensure that each of the partition's connected + * channels has the required number of assigned kthreads to get the work done. + */ +static void +xpc_channel_mgr(struct xpc_partition *part) +{ + while (part->act_state != XPC_P_DEACTIVATING || + atomic_read(&part->nchannels_active) > 0 || + !xpc_partition_disengaged(part)) { + + xpc_process_channel_activity(part); + + /* + * Wait until we've been requested to activate kthreads or + * all of the channel's message queues have been torn down or + * a signal is pending. + * + * The channel_mgr_requests is set to 1 after being awakened, + * This is done to prevent the channel mgr from making one pass + * through the loop for each request, since he will + * be servicing all the requests in one pass. The reason it's + * set to 1 instead of 0 is so that other kthreads will know + * that the channel mgr is running and won't bother trying to + * wake him up. + */ + atomic_dec(&part->channel_mgr_requests); + (void)wait_event_interruptible(part->channel_mgr_wq, + (atomic_read(&part->channel_mgr_requests) > 0 || + part->local_IPI_amo != 0 || + (part->act_state == XPC_P_DEACTIVATING && + atomic_read(&part->nchannels_active) == 0 && + xpc_partition_disengaged(part)))); + atomic_set(&part->channel_mgr_requests, 1); + } +} + +/* + * When XPC HB determines that a partition has come up, it will create a new + * kthread and that kthread will call this function to attempt to set up the + * basic infrastructure used for Cross Partition Communication with the newly + * upped partition. + * + * The kthread that was created by XPC HB and which setup the XPC + * infrastructure will remain assigned to the partition until the partition + * goes down. At which time the kthread will teardown the XPC infrastructure + * and then exit. + * + * XPC HB will put the remote partition's XPC per partition specific variables + * physical address into xpc_partitions[partid].remote_vars_part_pa prior to + * calling xpc_partition_up(). + */ +static void +xpc_partition_up(struct xpc_partition *part) +{ + DBUG_ON(part->channels != NULL); + + dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); + + if (xpc_setup_infrastructure(part) != xpcSuccess) + return; + + /* + * The kthread that XPC HB called us with will become the + * channel manager for this partition. It will not return + * back to XPC HB until the partition's XPC infrastructure + * has been dismantled. + */ + + (void)xpc_part_ref(part); /* this will always succeed */ + + if (xpc_make_first_contact(part) == xpcSuccess) + xpc_channel_mgr(part); + + xpc_part_deref(part); + + xpc_teardown_infrastructure(part); +} + +static int +xpc_activating(void *__partid) +{ + partid_t partid = (u64)__partid; + struct xpc_partition *part = &xpc_partitions[partid]; + unsigned long irq_flags; + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_DEACTIVATING) { + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + /* indicate the thread is activating */ + DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); + part->act_state = XPC_P_ACTIVATING; + + XPC_SET_REASON(part, 0, 0); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + dev_dbg(xpc_part, "bringing partition %d up\n", partid); + + /* + * Register the remote partition's AMOs with SAL so it can handle + * and cleanup errors within that address range should the remote + * partition go down. We don't unregister this range because it is + * difficult to tell when outstanding writes to the remote partition + * are finished and thus when it is safe to unregister. This should + * not result in wasted space in the SAL xp_addr_region table because + * we should get the same page for remote_amos_page_pa after module + * reloads and system reboots. + */ + if (sn_register_xp_addr_region(part->remote_amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " + "xp_addr region\n", partid); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + xpc_allow_hb(partid, xpc_vars); + xpc_IPI_send_activated(part); + + /* + * xpc_partition_up() holds this thread and marks this partition as + * XPC_P_ACTIVE by calling xpc_hb_mark_active(). + */ + (void)xpc_partition_up(part); + + xpc_disallow_hb(partid, xpc_vars); + xpc_mark_partition_inactive(part); + + if (part->reason == xpcReactivating) { + /* interrupting ourselves results in activating partition */ + xpc_IPI_send_reactivate(part); + } + + return 0; +} + +void +xpc_activate_partition(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + unsigned long irq_flags; + struct task_struct *kthread; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + DBUG_ON(part->act_state != XPC_P_INACTIVE); + + part->act_state = XPC_P_ACTIVATION_REQ; + XPC_SET_REASON(part, xpcCloneKThread, __LINE__); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", + partid); + if (IS_ERR(kthread)) { + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + } +} + +/* + * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified + * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more + * than one partition, we use an AMO_t structure per partition to indicate + * whether a partition has sent an IPI or not. If it has, then wake up the + * associated kthread to handle it. + * + * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC + * running on other partitions. + * + * Noteworthy Arguments: + * + * irq - Interrupt ReQuest number. NOT USED. + * + * dev_id - partid of IPI's potential sender. + */ +irqreturn_t +xpc_notify_IRQ_handler(int irq, void *dev_id) +{ + partid_t partid = (partid_t) (u64)dev_id; + struct xpc_partition *part = &xpc_partitions[partid]; + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + xpc_part_deref(part); + } + return IRQ_HANDLED; +} + +/* + * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor + * because the write to their associated IPI amo completed after the IRQ/IPI + * was received. + */ +void +xpc_dropped_IPI_check(struct xpc_partition *part) +{ + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + part->dropped_IPI_timer.expires = jiffies + + XPC_P_DROPPED_IPI_WAIT; + add_timer(&part->dropped_IPI_timer); + xpc_part_deref(part); + } +} + +void +xpc_activate_kthreads(struct xpc_channel *ch, int needed) +{ + int idle = atomic_read(&ch->kthreads_idle); + int assigned = atomic_read(&ch->kthreads_assigned); + int wakeup; + + DBUG_ON(needed <= 0); + + if (idle > 0) { + wakeup = (needed > idle) ? idle : needed; + needed -= wakeup; + + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " + "channel=%d\n", wakeup, ch->partid, ch->number); + + /* only wakeup the requested number of kthreads */ + wake_up_nr(&ch->idle_wq, wakeup); + } + + if (needed <= 0) + return; + + if (needed + assigned > ch->kthreads_assigned_limit) { + needed = ch->kthreads_assigned_limit - assigned; + if (needed <= 0) + return; + } + + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", + needed, ch->partid, ch->number); + + xpc_create_kthreads(ch, needed, 0); +} + +/* + * This function is where XPC's kthreads wait for messages to deliver. + */ +static void +xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) +{ + do { + /* deliver messages to their intended recipients */ + + while (ch->w_local_GP.get < ch->w_remote_GP.put && + !(ch->flags & XPC_C_DISCONNECTING)) { + xpc_deliver_msg(ch); + } + + if (atomic_inc_return(&ch->kthreads_idle) > + ch->kthreads_idle_limit) { + /* too many idle kthreads on this channel */ + atomic_dec(&ch->kthreads_idle); + break; + } + + dev_dbg(xpc_chan, "idle kthread calling " + "wait_event_interruptible_exclusive()\n"); + + (void)wait_event_interruptible_exclusive(ch->idle_wq, + (ch->w_local_GP.get < ch->w_remote_GP.put || + (ch->flags & XPC_C_DISCONNECTING))); + + atomic_dec(&ch->kthreads_idle); + + } while (!(ch->flags & XPC_C_DISCONNECTING)); +} + +static int +xpc_kthread_start(void *args) +{ + partid_t partid = XPC_UNPACK_ARG1(args); + u16 ch_number = XPC_UNPACK_ARG2(args); + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + int n_needed; + unsigned long irq_flags; + + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", + partid, ch_number); + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + + /* let registerer know that connection has been established */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { + ch->flags |= XPC_C_CONNECTEDCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_connected_callout(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + /* + * It is possible that while the callout was being + * made that the remote partition sent some messages. + * If that is the case, we may need to activate + * additional kthreads to help deliver them. We only + * need one less than total #of messages to deliver. + */ + n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; + if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) + xpc_activate_kthreads(ch, n_needed); + + } else { + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + + xpc_kthread_waitmsgs(part, ch); + } + + /* let registerer know that connection is disconnecting */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_disconnect_callout(ch, xpcDisconnecting); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (atomic_dec_return(&ch->kthreads_assigned) == 0) { + if (atomic_dec_return(&part->nchannels_engaged) == 0) { + xpc_mark_partition_disengaged(part); + xpc_IPI_send_disengage(part); + } + } + + xpc_msgqueue_deref(ch); + + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", + partid, ch_number); + + xpc_part_deref(part); + return 0; +} + +/* + * For each partition that XPC has established communications with, there is + * a minimum of one kernel thread assigned to perform any operation that + * may potentially sleep or block (basically the callouts to the asynchronous + * functions registered via xpc_connect()). + * + * Additional kthreads are created and destroyed by XPC as the workload + * demands. + * + * A kthread is assigned to one of the active channels that exists for a given + * partition. + */ +void +xpc_create_kthreads(struct xpc_channel *ch, int needed, + int ignore_disconnecting) +{ + unsigned long irq_flags; + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct task_struct *kthread; + + while (needed-- > 0) { + + /* + * The following is done on behalf of the newly created + * kthread. That kthread is responsible for doing the + * counterpart to the following before it exits. + */ + if (ignore_disconnecting) { + if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { + /* kthreads assigned had gone to zero */ + BUG_ON(!(ch->flags & + XPC_C_DISCONNECTINGCALLOUT_MADE)); + break; + } + + } else if (ch->flags & XPC_C_DISCONNECTING) { + break; + + } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { + if (atomic_inc_return(&part->nchannels_engaged) == 1) + xpc_mark_partition_engaged(part); + } + (void)xpc_part_ref(part); + xpc_msgqueue_ref(ch); + + kthread = kthread_run(xpc_kthread_start, (void *)args, + "xpc%02dc%d", ch->partid, ch->number); + if (IS_ERR(kthread)) { + /* the fork failed */ + + /* + * NOTE: if (ignore_disconnecting && + * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, + * then we'll deadlock if all other kthreads assigned + * to this channel are blocked in the channel's + * registerer, because the only thing that will unblock + * them is the xpcDisconnecting callout that this + * failed kthread_run() would have made. + */ + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + atomic_dec_return(&part->nchannels_engaged) == 0) { + xpc_mark_partition_disengaged(part); + xpc_IPI_send_disengage(part); + } + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + + if (atomic_read(&ch->kthreads_assigned) < + ch->kthreads_idle_limit) { + /* + * Flag this as an error only if we have an + * insufficient #of kthreads for the channel + * to function. + */ + spin_lock_irqsave(&ch->lock, irq_flags); + XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + break; + } + } +} + +void +xpc_disconnect_wait(int ch_number) +{ + unsigned long irq_flags; + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + int wakeup_channel_mgr; + + /* now wait for all callouts to the caller's function to cease */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (!xpc_part_ref(part)) + continue; + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_WDISCONNECT)) { + xpc_part_deref(part); + continue; + } + + wait_for_completion(&ch->wdisconnect_wait); + + spin_lock_irqsave(&ch->lock, irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + wakeup_channel_mgr = 0; + + if (ch->delayed_IPI_flags) { + if (part->act_state != XPC_P_DEACTIVATING) { + spin_lock(&part->IPI_lock); + XPC_SET_IPI_FLAGS(part->local_IPI_amo, + ch->number, + ch->delayed_IPI_flags); + spin_unlock(&part->IPI_lock); + wakeup_channel_mgr = 1; + } + ch->delayed_IPI_flags = 0; + } + + ch->flags &= ~XPC_C_WDISCONNECT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (wakeup_channel_mgr) + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); + } +} + +static void +xpc_do_exit(enum xpc_retval reason) +{ + partid_t partid; + int active_part_count, printed_waiting_msg = 0; + struct xpc_partition *part; + unsigned long printmsg_time, disengage_request_timeout = 0; + + /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ + DBUG_ON(xpc_exiting == 1); + + /* + * Let the heartbeat checker thread and the discovery thread + * (if one is running) know that they should exit. Also wake up + * the heartbeat checker thread in case it's sleeping. + */ + xpc_exiting = 1; + wake_up_interruptible(&xpc_act_IRQ_wq); + + /* ignore all incoming interrupts */ + free_irq(SGI_XPC_ACTIVATE, NULL); + + /* wait for the discovery thread to exit */ + wait_for_completion(&xpc_discovery_exited); + + /* wait for the heartbeat checker thread to exit */ + wait_for_completion(&xpc_hb_checker_exited); + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + /* wait for all partitions to become inactive */ + + printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); + xpc_disengage_request_timedout = 0; + + do { + active_part_count = 0; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_partition_disengaged(part) && + part->act_state == XPC_P_INACTIVE) { + continue; + } + + active_part_count++; + + XPC_DEACTIVATE_PARTITION(part, reason); + + if (part->disengage_request_timeout > + disengage_request_timeout) { + disengage_request_timeout = + part->disengage_request_timeout; + } + } + + if (xpc_partition_engaged(-1UL)) { + if (time_after(jiffies, printmsg_time)) { + dev_info(xpc_part, "waiting for remote " + "partitions to disengage, timeout in " + "%ld seconds\n", + (disengage_request_timeout - jiffies) + / HZ); + printmsg_time = jiffies + + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); + printed_waiting_msg = 1; + } + + } else if (active_part_count > 0) { + if (printed_waiting_msg) { + dev_info(xpc_part, "waiting for local partition" + " to disengage\n"); + printed_waiting_msg = 0; + } + + } else { + if (!xpc_disengage_request_timedout) { + dev_info(xpc_part, "all partitions have " + "disengaged\n"); + } + break; + } + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + } while (1); + + DBUG_ON(xpc_partition_engaged(-1UL)); + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + /* now it's time to eliminate our heartbeat */ + del_timer_sync(&xpc_hb_timer); + DBUG_ON(xpc_vars->heartbeating_to_mask != 0); + + if (reason == xpcUnloading) { + /* take ourselves off of the reboot_notifier_list */ + (void)unregister_reboot_notifier(&xpc_reboot_notifier); + + /* take ourselves off of the die_notifier list */ + (void)unregister_die_notifier(&xpc_die_notifier); + } + + /* close down protections for IPI operations */ + xpc_restrict_IPI_ops(); + + /* clear the interface to XPC's functions */ + xpc_clear_interface(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + kfree(xpc_remote_copy_buffer_base); +} + +/* + * This function is called when the system is being rebooted. + */ +static int +xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) +{ + enum xpc_retval reason; + + switch (event) { + case SYS_RESTART: + reason = xpcSystemReboot; + break; + case SYS_HALT: + reason = xpcSystemHalt; + break; + case SYS_POWER_OFF: + reason = xpcSystemPoweroff; + break; + default: + reason = xpcSystemGoingDown; + } + + xpc_do_exit(reason); + return NOTIFY_DONE; +} + +/* + * Notify other partitions to disengage from all references to our memory. + */ +static void +xpc_die_disengage(void) +{ + struct xpc_partition *part; + partid_t partid; + unsigned long engaged; + long time, printmsg_time, disengage_request_timeout; + + /* keep xpc_hb_checker thread from doing anything (just in case) */ + xpc_exiting = 1; + + xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> + remote_vars_version)) { + + /* just in case it was left set by an earlier XPC */ + xpc_clear_partition_engaged(1UL << partid); + continue; + } + + if (xpc_partition_engaged(1UL << partid) || + part->act_state != XPC_P_INACTIVE) { + xpc_request_partition_disengage(part); + xpc_mark_partition_disengaged(part); + xpc_IPI_send_disengage(part); + } + } + + time = rtc_time(); + printmsg_time = time + + (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); + disengage_request_timeout = time + + (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); + + /* wait for all other partitions to disengage from us */ + + while (1) { + engaged = xpc_partition_engaged(-1UL); + if (!engaged) { + dev_info(xpc_part, "all partitions have disengaged\n"); + break; + } + + time = rtc_time(); + if (time >= disengage_request_timeout) { + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + if (engaged & (1UL << partid)) { + dev_info(xpc_part, "disengage from " + "remote partition %d timed " + "out\n", partid); + } + } + break; + } + + if (time >= printmsg_time) { + dev_info(xpc_part, "waiting for remote partitions to " + "disengage, timeout in %ld seconds\n", + (disengage_request_timeout - time) / + sn_rtc_cycles_per_second); + printmsg_time = time + + (XPC_DISENGAGE_PRINTMSG_INTERVAL * + sn_rtc_cycles_per_second); + } + } +} + +/* + * This function is called when the system is being restarted or halted due + * to some sort of system failure. If this is the case we need to notify the + * other partitions to disengage from all references to our memory. + * This function can also be called when our heartbeater could be offlined + * for a time. In this case we need to notify other partitions to not worry + * about the lack of a heartbeat. + */ +static int +xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +{ + switch (event) { + case DIE_MACHINE_RESTART: + case DIE_MACHINE_HALT: + xpc_die_disengage(); + break; + + case DIE_KDEBUG_ENTER: + /* Should lack of heartbeat be ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + /* fall through */ + case DIE_MCA_MONARCH_ENTER: + case DIE_INIT_MONARCH_ENTER: + xpc_vars->heartbeat++; + xpc_vars->heartbeat_offline = 1; + break; + + case DIE_KDEBUG_LEAVE: + /* Is lack of heartbeat being ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + /* fall through */ + case DIE_MCA_MONARCH_LEAVE: + case DIE_INIT_MONARCH_LEAVE: + xpc_vars->heartbeat++; + xpc_vars->heartbeat_offline = 0; + break; + } + + return NOTIFY_DONE; +} + +int __init +xpc_init(void) +{ + int ret; + partid_t partid; + struct xpc_partition *part; + struct task_struct *kthread; + size_t buf_size; + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + buf_size = max(XPC_RP_VARS_SIZE, + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); + xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, + GFP_KERNEL, + &xpc_remote_copy_buffer_base); + if (xpc_remote_copy_buffer == NULL) + return -ENOMEM; + + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); + + xpc_sysctl = register_sysctl_table(xpc_sys_dir); + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); + + part->act_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, 0, 0); + + init_timer(&part->disengage_request_timer); + part->disengage_request_timer.function = + xpc_timeout_partition_disengage_request; + part->disengage_request_timer.data = (unsigned long)part; + + part->setup_state = XPC_P_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + /* + * Open up protections for IPI operations (and AMO operations on + * Shub 1.1 systems). + */ + xpc_allow_IPI_ops(); + + /* + * Interrupts being processed will increment this atomic variable and + * awaken the heartbeat thread which will process the interrupts. + */ + atomic_set(&xpc_act_IRQ_rcvd, 0); + + /* + * This is safe to do before the xpc_hb_checker thread has started + * because the handler releases a wait queue. If an interrupt is + * received before the thread is waiting, it will not go to sleep, + * but rather immediately process the interrupt. + */ + ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, + "xpc hb", NULL); + if (ret != 0) { + dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " + "errno=%d\n", -ret); + + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + kfree(xpc_remote_copy_buffer_base); + return -EBUSY; + } + + /* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ + xpc_rsvd_page = xpc_rsvd_page_init(); + if (xpc_rsvd_page == NULL) { + dev_err(xpc_part, "could not setup our reserved page\n"); + + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + kfree(xpc_remote_copy_buffer_base); + return -EBUSY; + } + + /* add ourselves to the reboot_notifier_list */ + ret = register_reboot_notifier(&xpc_reboot_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register reboot notifier\n"); + + /* add ourselves to the die_notifier list */ + ret = register_die_notifier(&xpc_die_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register die notifier\n"); + + init_timer(&xpc_hb_timer); + xpc_hb_timer.function = xpc_hb_beater; + + /* + * The real work-horse behind xpc. This processes incoming + * interrupts and monitors remote heartbeats. + */ + kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking hb check thread\n"); + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + /* take ourselves off of the reboot_notifier_list */ + (void)unregister_reboot_notifier(&xpc_reboot_notifier); + + /* take ourselves off of the die_notifier list */ + (void)unregister_die_notifier(&xpc_die_notifier); + + del_timer_sync(&xpc_hb_timer); + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + kfree(xpc_remote_copy_buffer_base); + return -EBUSY; + } + + /* + * Startup a thread that will attempt to discover other partitions to + * activate based on info provided by SAL. This new thread is short + * lived and will exit once discovery is complete. + */ + kthread = kthread_run(xpc_initiate_discovery, NULL, + XPC_DISCOVERY_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking discovery thread\n"); + + /* mark this new thread as a non-starter */ + complete(&xpc_discovery_exited); + + xpc_do_exit(xpcUnloading); + return -EBUSY; + } + + /* set the interface to point at XPC's functions */ + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, + xpc_initiate_allocate, xpc_initiate_send, + xpc_initiate_send_notify, xpc_initiate_received, + xpc_initiate_partid_to_nasids); + + return 0; +} + +module_init(xpc_init); + +void __exit +xpc_exit(void) +{ + xpc_do_exit(xpcUnloading); +} + +module_exit(xpc_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); +MODULE_LICENSE("GPL"); + +module_param(xpc_hb_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " + "heartbeat increments."); + +module_param(xpc_hb_check_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " + "heartbeat checks."); + +module_param(xpc_disengage_request_timelimit, int, 0); +MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " + "for disengage request to complete."); + +module_param(xpc_kdebug_ignore, int, 0); +MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " + "other partitions when dropping into kdebug."); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c new file mode 100644 index 00000000000..27e200ec582 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -0,0 +1,1174 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) partition support. + * + * This is the part of XPC that detects the presence/absence of + * other partitions. It provides a heartbeat and monitors the + * heartbeats of other partitions. + * + */ + +#include <linux/kernel.h> +#include <linux/sysctl.h> +#include <linux/cache.h> +#include <linux/mmzone.h> +#include <linux/nodemask.h> +#include <asm/uncached.h> +#include <asm/sn/bte.h> +#include <asm/sn/intr.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/nodepda.h> +#include <asm/sn/addrs.h> +#include "xpc.h" + +/* XPC is exiting flag */ +int xpc_exiting; + +/* SH_IPI_ACCESS shub register value on startup */ +static u64 xpc_sh1_IPI_access; +static u64 xpc_sh2_IPI_access0; +static u64 xpc_sh2_IPI_access1; +static u64 xpc_sh2_IPI_access2; +static u64 xpc_sh2_IPI_access3; + +/* original protection values for each node */ +u64 xpc_prot_vec[MAX_NUMNODES]; + +/* this partition's reserved page pointers */ +struct xpc_rsvd_page *xpc_rsvd_page; +static u64 *xpc_part_nasids; +static u64 *xpc_mach_nasids; +struct xpc_vars *xpc_vars; +struct xpc_vars_part *xpc_vars_part; + +static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ +static int xp_nasid_mask_words; /* actual size in words of nasid mask */ + +/* + * For performance reasons, each entry of xpc_partitions[] is cacheline + * aligned. And xpc_partitions[] is padded with an additional entry at the + * end so that the last legitimate entry doesn't share its cacheline with + * another variable. + */ +struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; + +/* + * Generic buffer used to store a local copy of portions of a remote + * partition's reserved page (either its header and part_nasids mask, + * or its vars). + */ +char *xpc_remote_copy_buffer; +void *xpc_remote_copy_buffer_base; + +/* + * Guarantee that the kmalloc'd memory is cacheline aligned. + */ +void * +xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Given a nasid, get the physical address of the partition's reserved page + * for that nasid. This function returns 0 on any error. + */ +static u64 +xpc_get_rsvd_page_pa(int nasid) +{ + bte_result_t bte_res; + s64 status; + u64 cookie = 0; + u64 rp_pa = nasid; /* seed with nasid */ + u64 len = 0; + u64 buf = buf; + u64 buf_len = 0; + void *buf_base = NULL; + + while (1) { + + status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, + &len); + + dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" + "0x%016lx, address=0x%016lx, len=0x%016lx\n", + status, cookie, rp_pa, len); + + if (status != SALRET_MORE_PASSES) + break; + + if (L1_CACHE_ALIGN(len) > buf_len) { + kfree(buf_base); + buf_len = L1_CACHE_ALIGN(len); + buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, + GFP_KERNEL, + &buf_base); + if (buf_base == NULL) { + dev_err(xpc_part, "unable to kmalloc " + "len=0x%016lx\n", buf_len); + status = SALRET_ERROR; + break; + } + } + + bte_res = xp_bte_copy(rp_pa, buf, buf_len, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bte_res != BTE_SUCCESS) { + dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); + status = SALRET_ERROR; + break; + } + } + + kfree(buf_base); + + if (status != SALRET_OK) + rp_pa = 0; + + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + return rp_pa; +} + +/* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ +struct xpc_rsvd_page * +xpc_rsvd_page_init(void) +{ + struct xpc_rsvd_page *rp; + AMO_t *amos_page; + u64 rp_pa, nasid_array = 0; + int i, ret; + + /* get the local reserved page's address */ + + preempt_disable(); + rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id())); + preempt_enable(); + if (rp_pa == 0) { + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); + return NULL; + } + rp = (struct xpc_rsvd_page *)__va(rp_pa); + + if (rp->partid != sn_partition_id) { + dev_err(xpc_part, "the reserved page's partid of %d should be " + "%d\n", rp->partid, sn_partition_id); + return NULL; + } + + rp->version = XPC_RP_VERSION; + + /* establish the actual sizes of the nasid masks */ + if (rp->SAL_version == 1) { + /* SAL_version 1 didn't set the nasids_size field */ + rp->nasids_size = 128; + } + xp_nasid_mask_bytes = rp->nasids_size; + xp_nasid_mask_words = xp_nasid_mask_bytes / 8; + + /* setup the pointers to the various items in the reserved page */ + xpc_part_nasids = XPC_RP_PART_NASIDS(rp); + xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); + xpc_vars = XPC_RP_VARS(rp); + xpc_vars_part = XPC_RP_VARS_PART(rp); + + /* + * Before clearing xpc_vars, see if a page of AMOs had been previously + * allocated. If not we'll need to allocate one and set permissions + * so that cross-partition AMOs are allowed. + * + * The allocated AMO page needs MCA reporting to remain disabled after + * XPC has unloaded. To make this work, we keep a copy of the pointer + * to this page (i.e., amos_page) in the struct xpc_vars structure, + * which is pointed to by the reserved page, and re-use that saved copy + * on subsequent loads of XPC. This AMO page is never freed, and its + * memory protections are never restricted. + */ + amos_page = xpc_vars->amos_page; + if (amos_page == NULL) { + amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0)); + if (amos_page == NULL) { + dev_err(xpc_part, "can't allocate page of AMOs\n"); + return NULL; + } + + /* + * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems + * when xpc_allow_IPI_ops() is called via xpc_hb_init(). + */ + if (!enable_shub_wars_1_1()) { + ret = sn_change_memprotect(ia64_tpa((u64)amos_page), + PAGE_SIZE, + SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xpc_part, "can't change memory " + "protections\n"); + uncached_free_page(__IA64_UNCACHED_OFFSET | + TO_PHYS((u64)amos_page)); + return NULL; + } + } + } else if (!IS_AMO_ADDRESS((u64)amos_page)) { + /* + * EFI's XPBOOT can also set amos_page in the reserved page, + * but it happens to leave it as an uncached physical address + * and we need it to be an uncached virtual, so we'll have to + * convert it. + */ + if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) { + dev_err(xpc_part, "previously used amos_page address " + "is bad = 0x%p\n", (void *)amos_page); + return NULL; + } + amos_page = (AMO_t *)TO_AMO((u64)amos_page); + } + + /* clear xpc_vars */ + memset(xpc_vars, 0, sizeof(struct xpc_vars)); + + xpc_vars->version = XPC_V_VERSION; + xpc_vars->act_nasid = cpuid_to_nasid(0); + xpc_vars->act_phys_cpuid = cpu_physical_id(0); + xpc_vars->vars_part_pa = __pa(xpc_vars_part); + xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ + + /* clear xpc_vars_part */ + memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * + XP_MAX_PARTITIONS); + + /* initialize the activate IRQ related AMO variables */ + for (i = 0; i < xp_nasid_mask_words; i++) + (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); + + /* initialize the engaged remote partitions related AMO variables */ + (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); + (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); + + /* timestamp of when reserved page was setup by XPC */ + rp->stamp = CURRENT_TIME; + + /* + * This signifies to the remote partition that our reserved + * page is initialized. + */ + rp->vars_pa = __pa(xpc_vars); + + return rp; +} + +/* + * Change protections to allow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_allow_IPI_ops(void) +{ + int node; + int nasid; + + /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */ + + if (is_shub2()) { + xpc_sh2_IPI_access0 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); + xpc_sh2_IPI_access1 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); + xpc_sh2_IPI_access2 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); + xpc_sh2_IPI_access3 = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + -1UL); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + -1UL); + } + + } else { + xpc_sh1_IPI_access = + (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + -1UL); + + /* + * Since the BIST collides with memory operations on + * SHUB 1.1 sn_change_memprotect() cannot be used. + */ + if (enable_shub_wars_1_1()) { + /* open up everything */ + xpc_prot_vec[node] = (u64)HUB_L((u64 *) + GLOBAL_MMR_ADDR + (nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); + HUB_S((u64 *) + GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + -1UL); + HUB_S((u64 *) + GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + -1UL); + } + } + } +} + +/* + * Restrict protections to disallow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_restrict_IPI_ops(void) +{ + int node; + int nasid; + + /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */ + + if (is_shub2()) { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + xpc_sh2_IPI_access0); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + xpc_sh2_IPI_access1); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + xpc_sh2_IPI_access2); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + xpc_sh2_IPI_access3); + } + + } else { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + xpc_sh1_IPI_access); + + if (enable_shub_wars_1_1()) { + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + } + } + } +} + +/* + * At periodic intervals, scan through all active partitions and ensure + * their heartbeat is still active. If not, the partition is deactivated. + */ +void +xpc_check_remote_hb(void) +{ + struct xpc_vars *remote_vars; + struct xpc_partition *part; + partid_t partid; + bte_result_t bres; + + remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + + if (xpc_exiting) + break; + + if (partid == sn_partition_id) + continue; + + part = &xpc_partitions[partid]; + + if (part->act_state == XPC_P_INACTIVE || + part->act_state == XPC_P_DEACTIVATING) { + continue; + } + + /* pull the remote_hb cache line */ + bres = xp_bte_copy(part->remote_vars_pa, + (u64)remote_vars, + XPC_RP_VARS_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + XPC_DEACTIVATE_PARTITION(part, + xpc_map_bte_errors(bres)); + continue; + } + + dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat" + " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n", + partid, remote_vars->heartbeat, part->last_heartbeat, + remote_vars->heartbeat_offline, + remote_vars->heartbeating_to_mask); + + if (((remote_vars->heartbeat == part->last_heartbeat) && + (remote_vars->heartbeat_offline == 0)) || + !xpc_hb_allowed(sn_partition_id, remote_vars)) { + + XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); + continue; + } + + part->last_heartbeat = remote_vars->heartbeat; + } +} + +/* + * Get a copy of a portion of the remote partition's rsvd page. + * + * remote_rp points to a buffer that is cacheline aligned for BTE copies and + * is large enough to contain a copy of their reserved page header and + * part_nasids mask. + */ +static enum xpc_retval +xpc_get_remote_rp(int nasid, u64 *discovered_nasids, + struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) +{ + int bres, i; + + /* get the reserved page's physical address */ + + *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); + if (*remote_rp_pa == 0) + return xpcNoRsvdPageAddr; + + /* pull over the reserved page header and part_nasids mask */ + bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, + XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) + return xpc_map_bte_errors(bres); + + if (discovered_nasids != NULL) { + u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); + + for (i = 0; i < xp_nasid_mask_words; i++) + discovered_nasids[i] |= remote_part_nasids[i]; + } + + /* check that the partid is for another partition */ + + if (remote_rp->partid < 1 || + remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { + return xpcInvalidPartid; + } + + if (remote_rp->partid == sn_partition_id) + return xpcLocalPartid; + + if (XPC_VERSION_MAJOR(remote_rp->version) != + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + +/* + * Get a copy of the remote partition's XPC variables from the reserved page. + * + * remote_vars points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_RP_VARS_SIZE. + */ +static enum xpc_retval +xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) +{ + int bres; + + if (remote_vars_pa == 0) + return xpcVarsNotSet; + + /* pull over the cross partition variables */ + bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) + return xpc_map_bte_errors(bres); + + if (XPC_VERSION_MAJOR(remote_vars->version) != + XPC_VERSION_MAJOR(XPC_V_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + +/* + * Update the remote partition's info. + */ +static void +xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, + struct timespec *remote_rp_stamp, u64 remote_rp_pa, + u64 remote_vars_pa, struct xpc_vars *remote_vars) +{ + part->remote_rp_version = remote_rp_version; + dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", + part->remote_rp_version); + + part->remote_rp_stamp = *remote_rp_stamp; + dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n", + part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec); + + part->remote_rp_pa = remote_rp_pa; + dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa); + + part->remote_vars_pa = remote_vars_pa; + dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", + part->remote_vars_pa); + + part->last_heartbeat = remote_vars->heartbeat; + dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", + part->last_heartbeat); + + part->remote_vars_part_pa = remote_vars->vars_part_pa; + dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", + part->remote_vars_part_pa); + + part->remote_act_nasid = remote_vars->act_nasid; + dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", + part->remote_act_nasid); + + part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; + dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", + part->remote_act_phys_cpuid); + + part->remote_amos_page_pa = remote_vars->amos_page_pa; + dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", + part->remote_amos_page_pa); + + part->remote_vars_version = remote_vars->version; + dev_dbg(xpc_part, " remote_vars_version = 0x%x\n", + part->remote_vars_version); +} + +/* + * Prior code has determined the nasid which generated an IPI. Inspect + * that nasid to determine if its partition needs to be activated or + * deactivated. + * + * A partition is consider "awaiting activation" if our partition + * flags indicate it is not active and it has a heartbeat. A + * partition is considered "awaiting deactivation" if our partition + * flags indicate it is active but it has no heartbeat or it is not + * sending its heartbeat to us. + * + * To determine the heartbeat, the remote nasid must have a properly + * initialized reserved page. + */ +static void +xpc_identify_act_IRQ_req(int nasid) +{ + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rp_pa; + u64 remote_vars_pa; + int remote_rp_version; + int reactivate = 0; + int stamp_diff; + struct timespec remote_rp_stamp = { 0, 0 }; + partid_t partid; + struct xpc_partition *part; + enum xpc_retval ret; + + /* pull over the reserved page structure */ + + remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; + + ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); + if (ret != xpcSuccess) { + dev_warn(xpc_part, "unable to get reserved page from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + return; + } + + remote_vars_pa = remote_rp->vars_pa; + remote_rp_version = remote_rp->version; + if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) + remote_rp_stamp = remote_rp->stamp; + + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + /* pull over the cross partition variables */ + + remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + + dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + return; + } + + part->act_IRQ_rcvd++; + + dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " + "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, + remote_vars->heartbeat, remote_vars->heartbeating_to_mask); + + if (xpc_partition_disengaged(part) && + part->act_state == XPC_P_INACTIVE) { + + xpc_update_partition_info(part, remote_rp_version, + &remote_rp_stamp, remote_rp_pa, + remote_vars_pa, remote_vars); + + if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { + if (xpc_partition_disengage_requested(1UL << partid)) { + /* + * Other side is waiting on us to disengage, + * even though we already have. + */ + return; + } + } else { + /* other side doesn't support disengage requests */ + xpc_clear_partition_disengage_request(1UL << partid); + } + + xpc_activate_partition(part); + return; + } + + DBUG_ON(part->remote_rp_version == 0); + DBUG_ON(part->remote_vars_version == 0); + + if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { + DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> + remote_vars_version)); + + if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { + DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> + version)); + /* see if the other side rebooted */ + if (part->remote_amos_page_pa == + remote_vars->amos_page_pa && + xpc_hb_allowed(sn_partition_id, remote_vars)) { + /* doesn't look that way, so ignore the IPI */ + return; + } + } + + /* + * Other side rebooted and previous XPC didn't support the + * disengage request, so we don't need to do anything special. + */ + + xpc_update_partition_info(part, remote_rp_version, + &remote_rp_stamp, remote_rp_pa, + remote_vars_pa, remote_vars); + part->reactivate_nasid = nasid; + XPC_DEACTIVATE_PARTITION(part, xpcReactivating); + return; + } + + DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)); + + if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { + DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); + + /* + * Other side rebooted and previous XPC did support the + * disengage request, but the new one doesn't. + */ + + xpc_clear_partition_engaged(1UL << partid); + xpc_clear_partition_disengage_request(1UL << partid); + + xpc_update_partition_info(part, remote_rp_version, + &remote_rp_stamp, remote_rp_pa, + remote_vars_pa, remote_vars); + reactivate = 1; + + } else { + DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); + + stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, + &remote_rp_stamp); + if (stamp_diff != 0) { + DBUG_ON(stamp_diff >= 0); + + /* + * Other side rebooted and the previous XPC did support + * the disengage request, as does the new one. + */ + + DBUG_ON(xpc_partition_engaged(1UL << partid)); + DBUG_ON(xpc_partition_disengage_requested(1UL << + partid)); + + xpc_update_partition_info(part, remote_rp_version, + &remote_rp_stamp, + remote_rp_pa, remote_vars_pa, + remote_vars); + reactivate = 1; + } + } + + if (part->disengage_request_timeout > 0 && + !xpc_partition_disengaged(part)) { + /* still waiting on other side to disengage from us */ + return; + } + + if (reactivate) { + part->reactivate_nasid = nasid; + XPC_DEACTIVATE_PARTITION(part, xpcReactivating); + + } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && + xpc_partition_disengage_requested(1UL << partid)) { + XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); + } +} + +/* + * Loop through the activation AMO variables and process any bits + * which are set. Each bit indicates a nasid sending a partition + * activation or deactivation request. + * + * Return #of IRQs detected. + */ +int +xpc_identify_act_IRQ_sender(void) +{ + int word, bit; + u64 nasid_mask; + u64 nasid; /* remote nasid */ + int n_IRQs_detected = 0; + AMO_t *act_amos; + + act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; + + /* scan through act AMO variable looking for non-zero entries */ + for (word = 0; word < xp_nasid_mask_words; word++) { + + if (xpc_exiting) + break; + + nasid_mask = xpc_IPI_receive(&act_amos[word]); + if (nasid_mask == 0) { + /* no IRQs from nasids in this variable */ + continue; + } + + dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, + nasid_mask); + + /* + * If this nasid has been added to the machine since + * our partition was reset, this will retain the + * remote nasid in our reserved pages machine mask. + * This is used in the event of module reload. + */ + xpc_mach_nasids[word] |= nasid_mask; + + /* locate the nasid(s) which sent interrupts */ + + for (bit = 0; bit < (8 * sizeof(u64)); bit++) { + if (nasid_mask & (1UL << bit)) { + n_IRQs_detected++; + nasid = XPC_NASID_FROM_W_B(word, bit); + dev_dbg(xpc_part, "interrupt from nasid %ld\n", + nasid); + xpc_identify_act_IRQ_req(nasid); + } + } + } + return n_IRQs_detected; +} + +/* + * See if the other side has responded to a partition disengage request + * from us. + */ +int +xpc_partition_disengaged(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + int disengaged; + + disengaged = (xpc_partition_engaged(1UL << partid) == 0); + if (part->disengage_request_timeout) { + if (!disengaged) { + if (time_before(jiffies, + part->disengage_request_timeout)) { + /* timelimit hasn't been reached yet */ + return 0; + } + + /* + * Other side hasn't responded to our disengage + * request in a timely fashion, so assume it's dead. + */ + + dev_info(xpc_part, "disengage from remote partition %d " + "timed out\n", partid); + xpc_disengage_request_timedout = 1; + xpc_clear_partition_engaged(1UL << partid); + disengaged = 1; + } + part->disengage_request_timeout = 0; + + /* cancel the timer function, provided it's not us */ + if (!in_interrupt()) { + del_singleshot_timer_sync(&part-> + disengage_request_timer); + } + + DBUG_ON(part->act_state != XPC_P_DEACTIVATING && + part->act_state != XPC_P_INACTIVE); + if (part->act_state != XPC_P_INACTIVE) + xpc_wakeup_channel_mgr(part); + + if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) + xpc_cancel_partition_disengage_request(part); + } + return disengaged; +} + +/* + * Mark specified partition as active. + */ +enum xpc_retval +xpc_mark_partition_active(struct xpc_partition *part) +{ + unsigned long irq_flags; + enum xpc_retval ret; + + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + if (part->act_state == XPC_P_ACTIVATING) { + part->act_state = XPC_P_ACTIVE; + ret = xpcSuccess; + } else { + DBUG_ON(part->reason == xpcSuccess); + ret = part->reason; + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + return ret; +} + +/* + * Notify XPC that the partition is down. + */ +void +xpc_deactivate_partition(const int line, struct xpc_partition *part, + enum xpc_retval reason) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_INACTIVE) { + XPC_SET_REASON(part, reason, line); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + if (reason == xpcReactivating) { + /* we interrupt ourselves to reactivate partition */ + xpc_IPI_send_reactivate(part); + } + return; + } + if (part->act_state == XPC_P_DEACTIVATING) { + if ((part->reason == xpcUnloading && reason != xpcUnloading) || + reason == xpcReactivating) { + XPC_SET_REASON(part, reason, line); + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + return; + } + + part->act_state = XPC_P_DEACTIVATING; + XPC_SET_REASON(part, reason, line); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { + xpc_request_partition_disengage(part); + xpc_IPI_send_disengage(part); + + /* set a timelimit on the disengage request */ + part->disengage_request_timeout = jiffies + + (xpc_disengage_request_timelimit * HZ); + part->disengage_request_timer.expires = + part->disengage_request_timeout; + add_timer(&part->disengage_request_timer); + } + + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", + XPC_PARTID(part), reason); + + xpc_partition_going_down(part, reason); +} + +/* + * Mark specified partition as inactive. + */ +void +xpc_mark_partition_inactive(struct xpc_partition *part) +{ + unsigned long irq_flags; + + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", + XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; +} + +/* + * SAL has provided a partition and machine mask. The partition mask + * contains a bit for each even nasid in our partition. The machine + * mask contains a bit for each even nasid in the entire machine. + * + * Using those two bit arrays, we can determine which nasids are + * known in the machine. Each should also have a reserved page + * initialized if they are available for partitioning. + */ +void +xpc_discovery(void) +{ + void *remote_rp_base; + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rp_pa; + u64 remote_vars_pa; + int region; + int region_size; + int max_regions; + int nasid; + struct xpc_rsvd_page *rp; + partid_t partid; + struct xpc_partition *part; + u64 *discovered_nasids; + enum xpc_retval ret; + + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + + xp_nasid_mask_bytes, + GFP_KERNEL, &remote_rp_base); + if (remote_rp == NULL) + return; + + remote_vars = (struct xpc_vars *)remote_rp; + + discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, + GFP_KERNEL); + if (discovered_nasids == NULL) { + kfree(remote_rp_base); + return; + } + + rp = (struct xpc_rsvd_page *)xpc_rsvd_page; + + /* + * The term 'region' in this context refers to the minimum number of + * nodes that can comprise an access protection grouping. The access + * protection is in regards to memory, IOI and IPI. + */ + max_regions = 64; + region_size = sn_region_size; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } + + for (region = 0; region < max_regions; region++) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "searching region %d\n", region); + + for (nasid = (region * region_size * 2); + nasid < ((region + 1) * region_size * 2); nasid += 2) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "checking nasid %d\n", nasid); + + if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { + dev_dbg(xpc_part, "PROM indicates Nasid %d is " + "part of the local partition; skipping " + "region\n", nasid); + break; + } + + if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) { + dev_dbg(xpc_part, "PROM indicates Nasid %d was " + "not on Numa-Link network at reset\n", + nasid); + continue; + } + + if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { + dev_dbg(xpc_part, "Nasid %d is part of a " + "partition which was previously " + "discovered\n", nasid); + continue; + } + + /* pull over the reserved page structure */ + + ret = xpc_get_remote_rp(nasid, discovered_nasids, + remote_rp, &remote_rp_pa); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get reserved page " + "from nasid %d, reason=%d\n", nasid, + ret); + + if (ret == xpcLocalPartid) + break; + + continue; + } + + remote_vars_pa = remote_rp->vars_pa; + + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + /* pull over the cross partition variables */ + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get XPC variables " + "from nasid %d, reason=%d\n", nasid, + ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + continue; + } + + if (part->act_state != XPC_P_INACTIVE) { + dev_dbg(xpc_part, "partition %d on nasid %d is " + "already activating\n", partid, nasid); + break; + } + + /* + * Register the remote partition's AMOs with SAL so it + * can handle and cleanup errors within that address + * range should the remote partition go down. We don't + * unregister this range because it is difficult to + * tell when outstanding writes to the remote partition + * are finished and thus when it is thus safe to + * unregister. This should not result in wasted space + * in the SAL xp_addr_region table because we should + * get the same page for remote_act_amos_pa after + * module reloads and system reboots. + */ + if (sn_register_xp_addr_region + (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) { + dev_dbg(xpc_part, + "partition %d failed to " + "register xp_addr region 0x%016lx\n", + partid, remote_vars->amos_page_pa); + + XPC_SET_REASON(part, xpcPhysAddrRegFailed, + __LINE__); + break; + } + + /* + * The remote nasid is valid and available. + * Send an interrupt to that nasid to notify + * it that we are ready to begin activation. + */ + dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, " + "nasid %d, phys_cpuid 0x%x\n", + remote_vars->amos_page_pa, + remote_vars->act_nasid, + remote_vars->act_phys_cpuid); + + if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> + version)) { + part->remote_amos_page_pa = + remote_vars->amos_page_pa; + xpc_mark_partition_disengaged(part); + xpc_cancel_partition_disengage_request(part); + } + xpc_IPI_send_activate(remote_vars); + } + } + + kfree(discovered_nasids); + kfree(remote_rp_base); +} + +/* + * Given a partid, get the nasids owned by that partition from the + * remote partition's reserved page. + */ +enum xpc_retval +xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) +{ + struct xpc_partition *part; + u64 part_nasid_pa; + int bte_res; + + part = &xpc_partitions[partid]; + if (part->remote_rp_pa == 0) + return xpcPartitionDown; + + memset(nasid_mask, 0, XP_NASID_MASK_BYTES); + + part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); + + bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask, + xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), + NULL); + + return xpc_map_bte_errors(bte_res); +} diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c new file mode 100644 index 00000000000..a9543c65814 --- /dev/null +++ b/drivers/misc/sgi-xp/xpnet.c @@ -0,0 +1,677 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * Cross Partition Network Interface (XPNET) support + * + * XPNET provides a virtual network layered on top of the Cross + * Partition communication layer. + * + * XPNET provides direct point-to-point and broadcast-like support + * for an ethernet-like device. The ethernet broadcast medium is + * replaced with a point-to-point message structure which passes + * pointers to a DMA-capable block that a remote partition should + * retrieve and pass to the upper level networking layer. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/smp.h> +#include <linux/string.h> +#include <asm/sn/bte.h> +#include <asm/sn/io.h> +#include <asm/sn/sn_sal.h> +#include <asm/atomic.h> +#include "xp.h" + +/* + * The message payload transferred by XPC. + * + * buf_pa is the physical address where the DMA should pull from. + * + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a + * cacheline boundary. To accomplish this, we record the number of + * bytes from the beginning of the first cacheline to the first useful + * byte of the skb (leadin_ignore) and the number of bytes from the + * last useful byte of the skb to the end of the last cacheline + * (tailout_ignore). + * + * size is the number of bytes to transfer which includes the skb->len + * (useful bytes of the senders skb) plus the leadin and tailout + */ +struct xpnet_message { + u16 version; /* Version for this message */ + u16 embedded_bytes; /* #of bytes embedded in XPC message */ + u32 magic; /* Special number indicating this is xpnet */ + u64 buf_pa; /* phys address of buffer to retrieve */ + u32 size; /* #of bytes in buffer */ + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ + u8 tailout_ignore; /* #of bytes to ignore at the end */ + unsigned char data; /* body of small packets */ +}; + +/* + * Determine the size of our message, the cacheline aligned size, + * and then the number of message will request from XPC. + * + * XPC expects each message to exist in an individual cacheline. + */ +#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) +#define XPNET_MSG_DATA_MAX \ + (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) +#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) + +#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) +#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) + +/* + * Version number of XPNET implementation. XPNET can always talk to versions + * with same major #, and never talk to versions with a different version. + */ +#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) +#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) + +#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */ +#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */ +#define XPNET_MAGIC 0x88786984 /* "XNET" */ + +#define XPNET_VALID_MSG(_m) \ + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ + && (msg->magic == XPNET_MAGIC)) + +#define XPNET_DEVICE_NAME "xp0" + +/* + * When messages are queued with xpc_send_notify, a kmalloc'd buffer + * of the following type is passed as a notification cookie. When the + * notification function is called, we use the cookie to decide + * whether all outstanding message sends have completed. The skb can + * then be released. + */ +struct xpnet_pending_msg { + struct list_head free_list; + struct sk_buff *skb; + atomic_t use_count; +}; + +/* driver specific structure pointed to by the device structure */ +struct xpnet_dev_private { + struct net_device_stats stats; +}; + +struct net_device *xpnet_device; + +/* + * When we are notified of other partitions activating, we add them to + * our bitmask of partitions to which we broadcast. + */ +static u64 xpnet_broadcast_partitions; +/* protect above */ +static DEFINE_SPINLOCK(xpnet_broadcast_lock); + +/* + * Since the Block Transfer Engine (BTE) is being used for the transfer + * and it relies upon cache-line size transfers, we need to reserve at + * least one cache-line for head and tail alignment. The BTE is + * limited to 8MB transfers. + * + * Testing has shown that changing MTU to greater than 64KB has no effect + * on TCP as the two sides negotiate a Max Segment Size that is limited + * to 64K. Other protocols May use packets greater than this, but for + * now, the default is 64KB. + */ +#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) +/* 32KB has been determined to be the ideal */ +#define XPNET_DEF_MTU (0x8000UL) + +/* + * The partition id is encapsulated in the MAC address. The following + * define locates the octet the partid is in. + */ +#define XPNET_PARTID_OCTET 1 +#define XPNET_LICENSE_OCTET 2 + +/* + * Define the XPNET debug device structure that is to be used with dev_dbg(), + * dev_err(), dev_warn(), and dev_info(). + */ +struct device_driver xpnet_dbg_name = { + .name = "xpnet" +}; + +struct device xpnet_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xpnet_dbg_name +}; + +struct device *xpnet = &xpnet_dbg_subname; + +/* + * Packet was recevied by XPC and forwarded to us. + */ +static void +xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) +{ + struct sk_buff *skb; + bte_result_t bret; + struct xpnet_dev_private *priv = + (struct xpnet_dev_private *)xpnet_device->priv; + + if (!XPNET_VALID_MSG(msg)) { + /* + * Packet with a different XPC version. Ignore. + */ + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + /* reserve an extra cache line */ + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); + if (!skb) { + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", + msg->size + L1_CACHE_BYTES); + + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + + /* + * The allocated skb has some reserved space. + * In order to use bte_copy, we need to get the + * skb->data pointer moved forward. + */ + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & + (L1_CACHE_BYTES - 1)) + + msg->leadin_ignore)); + + /* + * Update the tail pointer to indicate data actually + * transferred. + */ + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); + + /* + * Move the data over from the other side. + */ + if ((XPNET_VERSION_MINOR(msg->version) == 1) && + (msg->embedded_bytes != 0)) { + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " + "%lu)\n", skb->data, &msg->data, + (size_t)msg->embedded_bytes); + + skb_copy_to_linear_data(skb, &msg->data, + (size_t)msg->embedded_bytes); + } else { + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" + "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size); + + bret = bte_copy(msg->buf_pa, + __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + + if (bret != BTE_SUCCESS) { + /* + * >>> Need better way of cleaning skb. Currently skb + * >>> appears in_use and we can't just call + * >>> dev_kfree_skb. + */ + dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " + "error=0x%x\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & + ~(L1_CACHE_BYTES - 1)), + msg->size, bret); + + xpc_received(partid, channel, (void *)msg); + + priv->stats.rx_errors++; + + return; + } + } + + dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + skb->protocol = eth_type_trans(skb, xpnet_device); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(xpnet, "passing skb to network layer\n" + KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", + (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->len); + + xpnet_device->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len + ETH_HLEN; + + netif_rx_ni(skb); + xpc_received(partid, channel, (void *)msg); +} + +/* + * This is the handler which XPC calls during any sort of change in + * state or message reception on a connection. + */ +static void +xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, + void *data, void *key) +{ + long bp; + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(channel != XPC_NET_CHANNEL); + + switch (reason) { + case xpcMsgReceived: /* message received */ + DBUG_ON(data == NULL); + + xpnet_receive(partid, channel, (struct xpnet_message *)data); + break; + + case xpcConnected: /* connection completed to a partition */ + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions |= 1UL << (partid - 1); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + netif_carrier_on(xpnet_device); + + dev_dbg(xpnet, "%s connection created to partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + default: + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions &= ~(1UL << (partid - 1)); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + if (bp == 0) + netif_carrier_off(xpnet_device); + + dev_dbg(xpnet, "%s disconnected from partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + } +} + +static int +xpnet_dev_open(struct net_device *dev) +{ + enum xpc_retval ret; + + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " + "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, + XPNET_MAX_IDLE_KTHREADS); + + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); + if (ret != xpcSuccess) { + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " + "ret=%d\n", dev->name, ret); + + return -ENOMEM; + } + + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); + + return 0; +} + +static int +xpnet_dev_stop(struct net_device *dev) +{ + xpc_disconnect(XPC_NET_CHANNEL); + + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); + + return 0; +} + +static int +xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + /* 68 comes from min TCP+IP+MAC header */ + if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { + dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " + "between 68 and %ld\n", dev->name, new_mtu, + XPNET_MAX_MTU); + return -EINVAL; + } + + dev->mtu = new_mtu; + dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); + return 0; +} + +/* + * Required for the net_device structure. + */ +static int +xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) +{ + return 0; +} + +/* + * Return statistics to the caller. + */ +static struct net_device_stats * +xpnet_dev_get_stats(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + priv = (struct xpnet_dev_private *)dev->priv; + + return &priv->stats; +} + +/* + * Notification that the other end has received the message and + * DMA'd the skb information. At this point, they are done with + * our side. When all recipients are done processing, we + * release the skb and then release our pending message structure. + */ +static void +xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, + void *__qm) +{ + struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; + + DBUG_ON(queued_msg == NULL); + + dev_dbg(xpnet, "message to %d notified with reason %d\n", + partid, reason); + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", + (void *)queued_msg->skb->head); + + dev_kfree_skb_any(queued_msg->skb); + kfree(queued_msg); + } +} + +/* + * Network layer has formatted a packet (skb) and is ready to place it + * "on the wire". Prepare and send an xpnet_message to all partitions + * which have connected with us and are targets of this packet. + * + * MAC-NOTE: For the XPNET driver, the MAC address contains the + * destination partition_id. If the destination partition id word + * is 0xff, this packet is to broadcast to all partitions. + */ +static int +xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xpnet_pending_msg *queued_msg; + enum xpc_retval ret; + struct xpnet_message *msg; + u64 start_addr, end_addr; + long dp; + u8 second_mac_octet; + partid_t dest_partid; + struct xpnet_dev_private *priv; + u16 embedded_bytes; + + priv = (struct xpnet_dev_private *)dev->priv; + + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + /* + * The xpnet_pending_msg tracks how many outstanding + * xpc_send_notifies are relying on this skb. When none + * remain, release the skb. + */ + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); + if (queued_msg == NULL) { + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " + "packet\n", sizeof(struct xpnet_pending_msg)); + + priv->stats.tx_errors++; + + return -ENOMEM; + } + + /* get the beginning of the first cacheline and end of last */ + start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1)); + end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); + + /* calculate how many bytes to embed in the XPC message */ + embedded_bytes = 0; + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { + /* skb->data does fit so embed */ + embedded_bytes = skb->len; + } + + /* + * Since the send occurs asynchronously, we set the count to one + * and begin sending. Any sends that happen to complete before + * we are done sending will not free the skb. We will be left + * with that task during exit. This also handles the case of + * a packet destined for a partition which is no longer up. + */ + atomic_set(&queued_msg->use_count, 1); + queued_msg->skb = skb; + + second_mac_octet = skb->data[XPNET_PARTID_OCTET]; + if (second_mac_octet == 0xff) { + /* we are being asked to broadcast to all partitions */ + dp = xpnet_broadcast_partitions; + } else if (second_mac_octet != 0) { + dp = xpnet_broadcast_partitions & + (1UL << (second_mac_octet - 1)); + } else { + /* 0 is an invalid partid. Ignore */ + dp = 0; + } + dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); + + /* + * If we wanted to allow promiscuous mode to work like an + * unswitched network, this would be a good point to OR in a + * mask of partitions which should be receiving all packets. + */ + + /* + * Main send loop. + */ + for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; + dest_partid++) { + + if (!(dp & (1UL << (dest_partid - 1)))) { + /* not destined for this partition */ + continue; + } + + /* remove this partition from the destinations mask */ + dp &= ~(1UL << (dest_partid - 1)); + + /* found a partition to send to */ + + ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, + XPC_NOWAIT, (void **)&msg); + if (unlikely(ret != xpcSuccess)) + continue; + + msg->embedded_bytes = embedded_bytes; + if (unlikely(embedded_bytes != 0)) { + msg->version = XPNET_VERSION_EMBED; + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", + &msg->data, skb->data, (size_t)embedded_bytes); + skb_copy_from_linear_data(skb, &msg->data, + (size_t)embedded_bytes); + } else { + msg->version = XPNET_VERSION; + } + msg->magic = XPNET_MAGIC; + msg->size = end_addr - start_addr; + msg->leadin_ignore = (u64)skb->data - start_addr; + msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); + msg->buf_pa = __pa(start_addr); + + dev_dbg(xpnet, "sending XPC message to %d:%d\n" + KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " + "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", + dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + atomic_inc(&queued_msg->use_count); + + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, + xpnet_send_completed, queued_msg); + if (unlikely(ret != xpcSuccess)) { + atomic_dec(&queued_msg->use_count); + continue; + } + } + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "no partitions to receive packet destined for " + "%d\n", dest_partid); + + dev_kfree_skb(skb); + kfree(queued_msg); + } + + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + return 0; +} + +/* + * Deal with transmit timeouts coming from the network layer. + */ +static void +xpnet_dev_tx_timeout(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + priv = (struct xpnet_dev_private *)dev->priv; + + priv->stats.tx_errors++; + return; +} + +static int __init +xpnet_init(void) +{ + int i; + u32 license_num; + int result = -ENOMEM; + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); + + /* + * use ether_setup() to init the majority of our device + * structure and then override the necessary pieces. + */ + xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), + XPNET_DEVICE_NAME, ether_setup); + if (xpnet_device == NULL) + return -ENOMEM; + + netif_carrier_off(xpnet_device); + + xpnet_device->mtu = XPNET_DEF_MTU; + xpnet_device->change_mtu = xpnet_dev_change_mtu; + xpnet_device->open = xpnet_dev_open; + xpnet_device->get_stats = xpnet_dev_get_stats; + xpnet_device->stop = xpnet_dev_stop; + xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; + xpnet_device->tx_timeout = xpnet_dev_tx_timeout; + xpnet_device->set_config = xpnet_dev_set_config; + + /* + * Multicast assumes the LSB of the first octet is set for multicast + * MAC addresses. We chose the first octet of the MAC to be unlikely + * to collide with any vendor's officially issued MAC. + */ + xpnet_device->dev_addr[0] = 0xfe; + xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; + license_num = sn_partition_serial_number_val(); + for (i = 3; i >= 0; i--) { + xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = + license_num & 0xff; + license_num = license_num >> 8; + } + + /* + * ether_setup() sets this to a multicast device. We are + * really not supporting multicast at this time. + */ + xpnet_device->flags &= ~IFF_MULTICAST; + + /* + * No need to checksum as it is a DMA transfer. The BTE will + * report an error if the data is not retrievable and the + * packet will be dropped. + */ + xpnet_device->features = NETIF_F_NO_CSUM; + + result = register_netdev(xpnet_device); + if (result != 0) + free_netdev(xpnet_device); + + return result; +} + +module_init(xpnet_init); + +static void __exit +xpnet_exit(void) +{ + dev_info(xpnet, "unregistering network device %s\n", + xpnet_device[0].name); + + unregister_netdev(xpnet_device); + + free_netdev(xpnet_device); +} + +module_exit(xpnet_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index e04bf992644..0b94833e23f 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -1083,15 +1083,12 @@ static void start_timer(struct scc_priv *priv, int t, int r15) if (t == 0) { tm_isr(priv); } else if (t > 0) { - save_flags(flags); - cli(); outb(t & 0xFF, priv->tmr_cnt); outb((t >> 8) & 0xFF, priv->tmr_cnt); if (priv->type != TYPE_TWIN) { write_scc(priv, R15, r15 | CTSIE); priv->rr0 |= CTS; } - restore_flags(flags); } } diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index f844b738d34..c4e631d14bf 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -49,7 +49,9 @@ config IWL4965_HT config IWL4965_LEDS bool "Enable LEDS features in iwl4965 driver" - depends on IWL4965 && MAC80211_LEDS && LEDS_CLASS + depends on IWL4965 + select MAC80211_LEDS + select LEDS_CLASS select IWLWIFI_LEDS ---help--- This option enables LEDS for the iwlwifi drivers @@ -134,7 +136,9 @@ config IWL3945_SPECTRUM_MEASUREMENT config IWL3945_LEDS bool "Enable LEDS features in iwl3945 driver" - depends on IWL3945 && MAC80211_LEDS && LEDS_CLASS + depends on IWL3945 + select MAC80211_LEDS + select LEDS_CLASS ---help--- This option enables LEDS for the iwl3945 driver. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 4f3e88b12e3..ec6187b75c3 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_IWLCORE) := iwlcore.o +obj-$(CONFIG_IWLCORE) += iwlcore.o iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index a1e3938cba9..ab1029e7988 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -60,7 +60,8 @@ config RT2400PCI_RFKILL config RT2400PCI_LEDS bool "RT2400 leds support" - depends on RT2400PCI && LEDS_CLASS + depends on RT2400PCI + select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- This adds support for led triggers provided my mac80211. @@ -86,7 +87,8 @@ config RT2500PCI_RFKILL config RT2500PCI_LEDS bool "RT2500 leds support" - depends on RT2500PCI && LEDS_CLASS + depends on RT2500PCI + select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- This adds support for led triggers provided my mac80211. @@ -114,7 +116,8 @@ config RT61PCI_RFKILL config RT61PCI_LEDS bool "RT61 leds support" - depends on RT61PCI && LEDS_CLASS + depends on RT61PCI + select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- This adds support for led triggers provided my mac80211. @@ -130,7 +133,8 @@ config RT2500USB config RT2500USB_LEDS bool "RT2500 leds support" - depends on RT2500USB && LEDS_CLASS + depends on RT2500USB + select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- This adds support for led triggers provided my mac80211. @@ -148,7 +152,8 @@ config RT73USB config RT73USB_LEDS bool "RT73 leds support" - depends on RT73USB && LEDS_CLASS + depends on RT73USB + select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- This adds support for led triggers provided my mac80211. diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index f9b7bdd2782..8ddb918f5f5 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -416,13 +416,13 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) * Reserve some resources for CardBus. We reserve * a fixed amount of bus space for CardBus bridges. */ - b_res[0].start = pci_cardbus_io_size; - b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; - b_res[0].flags |= IORESOURCE_IO; + b_res[0].start = 0; + b_res[0].end = pci_cardbus_io_size - 1; + b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; - b_res[1].start = pci_cardbus_io_size; - b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1; - b_res[1].flags |= IORESOURCE_IO; + b_res[1].start = 0; + b_res[1].end = pci_cardbus_io_size - 1; + b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; /* * Check whether prefetchable memory is supported @@ -441,17 +441,17 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) * twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { - b_res[2].start = pci_cardbus_mem_size; - b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1; - b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; + b_res[2].start = 0; + b_res[2].end = pci_cardbus_mem_size - 1; + b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; - b_res[3].start = pci_cardbus_mem_size; - b_res[3].end = b_res[3].start + pci_cardbus_mem_size - 1; - b_res[3].flags |= IORESOURCE_MEM; + b_res[3].start = 0; + b_res[3].end = pci_cardbus_mem_size - 1; + b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; } else { - b_res[3].start = pci_cardbus_mem_size * 2; - b_res[3].end = b_res[3].start + pci_cardbus_mem_size * 2 - 1; - b_res[3].flags |= IORESOURCE_MEM; + b_res[3].start = 0; + b_res[3].end = pci_cardbus_mem_size * 2 - 1; + b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; } } diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index ed8c0690480..8d8852651fd 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -200,7 +200,6 @@ config PCMCIA_AU1X00 config PCMCIA_SA1100 tristate "SA1100 support" depends on ARM && ARCH_SA1100 && PCMCIA - depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE help Say Y here to include support for SA11x0-based PCMCIA or CF sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ @@ -221,6 +220,7 @@ config PCMCIA_SA1111 config PCMCIA_PXA2XX tristate "PXA2xx support" depends on ARM && ARCH_PXA && PCMCIA + depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE help Say Y here to include support for the PXA2xx PCMCIA controller diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 2dcd1960aca..98cbc9f18ee 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -84,10 +84,12 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) && i < PNP_MAX_IRQ) i++; - if (i >= PNP_MAX_IRQ && !warned) { - printk(KERN_WARNING "pnpacpi: exceeded the max number of IRQ " - "resources: %d \n", PNP_MAX_IRQ); - warned = 1; + if (i >= PNP_MAX_IRQ) { + if (!warned) { + printk(KERN_WARNING "pnpacpi: exceeded the max number" + " of IRQ resources: %d\n", PNP_MAX_IRQ); + warned = 1; + } return; } /* |