summaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r--drivers/block/drbd/drbd_main.c2009
1 files changed, 1076 insertions, 933 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 929468e1512..331e5cc1227 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -52,16 +52,12 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
static DEFINE_MUTEX(drbd_main_mutex);
-int drbdd_init(struct drbd_thread *);
-int drbd_worker(struct drbd_thread *);
-int drbd_asender(struct drbd_thread *);
-
-int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static void drbd_release(struct gendisk *gd, fmode_t mode);
static int w_md_sync(struct drbd_work *w, int unused);
@@ -118,8 +114,8 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct idr minors;
-struct list_head drbd_tconns; /* list of struct drbd_tconn */
+struct idr drbd_devices;
+struct list_head drbd_resources;
struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */
@@ -166,15 +162,15 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask)
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
*/
-int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
- atomic_inc(&mdev->local_cnt);
- io_allowed = (mdev->state.disk >= mins);
+ atomic_inc(&device->local_cnt);
+ io_allowed = (device->state.disk >= mins);
if (!io_allowed) {
- if (atomic_dec_and_test(&mdev->local_cnt))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->local_cnt))
+ wake_up(&device->misc_wait);
}
return io_allowed;
}
@@ -183,7 +179,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
/**
* tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
- * @tconn: DRBD connection.
+ * @connection: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
@@ -191,7 +187,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
* epoch of not yet barrier-acked requests, this function will cause a
* termination of the connection.
*/
-void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
@@ -199,11 +195,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
int expect_epoch = 0;
int expect_size = 0;
- spin_lock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
/* find oldest not yet barrier-acked write request,
* count writes in its epoch. */
- list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry(r, &connection->transfer_log, tl_requests) {
const unsigned s = r->rq_state;
if (!req) {
if (!(s & RQ_WRITE))
@@ -228,18 +224,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* first some paranoia code */
if (req == NULL) {
- conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
barrier_nr);
goto bail;
}
if (expect_epoch != barrier_nr) {
- conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
barrier_nr, expect_epoch);
goto bail;
}
if (expect_size != set_size) {
- conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, expect_size);
goto bail;
}
@@ -248,90 +244,91 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* this extra list walk restart is paranoia,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
- list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
break;
- list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
_req_mod(req, BARRIER_ACKED);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
return;
bail:
- spin_unlock_irq(&tconn->req_lock);
- conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
+ spin_unlock_irq(&connection->resource->req_lock);
+ conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
/**
* _tl_restart() - Walks the transfer log, and applies an action to all requests
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @what: The action/event to perform with all request objects
*
* @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
* RESTART_FROZEN_DISK_IO.
*/
/* must hold resource->req_lock */
-void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
struct drbd_request *req, *r;
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
_req_mod(req, what);
}
-void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
- spin_lock_irq(&tconn->req_lock);
- _tl_restart(tconn, what);
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ _tl_restart(connection, what);
+ spin_unlock_irq(&connection->resource->req_lock);
}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* This is called after the connection to the peer was lost. The storage covered
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_tconn *tconn)
+void tl_clear(struct drbd_connection *connection)
{
- tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
}
/**
- * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
- * @mdev: DRBD device.
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
+ * @device: DRBD device.
*/
-void tl_abort_disk_io(struct drbd_conf *mdev)
+void tl_abort_disk_io(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req, *r;
- spin_lock_irq(&tconn->req_lock);
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
+ spin_lock_irq(&connection->resource->req_lock);
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- if (req->w.mdev != mdev)
+ if (req->device != device)
continue;
_req_mod(req, ABORT_DISK_IO);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
}
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
unsigned long flags;
int retval;
snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
- thi->name[0], thi->tconn->name);
+ thi->name[0],
+ resource->name);
restart:
retval = thi->function(thi);
@@ -349,7 +346,7 @@ restart:
*/
if (thi->t_state == RESTARTING) {
- conn_info(tconn, "Restarting %s thread\n", thi->name);
+ drbd_info(resource, "Restarting %s thread\n", thi->name);
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
@@ -361,29 +358,32 @@ restart:
complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- conn_info(tconn, "Terminating %s\n", current->comm);
+ drbd_info(resource, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *), char *name)
+static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), const char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
thi->t_state = NONE;
thi->function = func;
- thi->tconn = tconn;
- strncpy(thi->name, name, ARRAY_SIZE(thi->name));
+ thi->resource = resource;
+ thi->connection = NULL;
+ thi->name = name;
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
struct task_struct *nt;
unsigned long flags;
@@ -393,17 +393,19 @@ int drbd_thread_start(struct drbd_thread *thi)
switch (thi->t_state) {
case NONE:
- conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Starting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
+ drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
- kref_get(&thi->tconn->kref);
+ kref_get(&resource->kref);
+ if (thi->connection)
+ kref_get(&thi->connection->kref);
init_completion(&thi->stop);
thi->reset_cpu_mask = 1;
@@ -412,12 +414,14 @@ int drbd_thread_start(struct drbd_thread *thi)
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd_%c_%s", thi->name[0], thi->tconn->name);
+ "drbd_%c_%s", thi->name[0], thi->resource->name);
if (IS_ERR(nt)) {
- conn_err(tconn, "Couldn't start thread\n");
+ drbd_err(resource, "Couldn't start thread\n");
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return false;
}
@@ -429,7 +433,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break;
case EXITING:
thi->t_state = RESTARTING;
- conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* fall through */
case RUNNING:
@@ -478,65 +482,60 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
-static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
-{
- struct drbd_thread *thi =
- task == tconn->receiver.task ? &tconn->receiver :
- task == tconn->asender.task ? &tconn->asender :
- task == tconn->worker.task ? &tconn->worker : NULL;
-
- return thi;
-}
-
-char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+int conn_lowest_minor(struct drbd_connection *connection)
{
- struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
- return thi ? thi->name : task->comm;
-}
-
-int conn_lowest_minor(struct drbd_tconn *tconn)
-{
- struct drbd_conf *mdev;
- int vnr = 0, m;
+ struct drbd_peer_device *peer_device;
+ int vnr = 0, minor = -1;
rcu_read_lock();
- mdev = idr_get_next(&tconn->volumes, &vnr);
- m = mdev ? mdev_to_minor(mdev) : -1;
+ peer_device = idr_get_next(&connection->peer_devices, &vnr);
+ if (peer_device)
+ minor = device_to_minor(peer_device->device);
rcu_read_unlock();
- return m;
+ return minor;
}
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
- * @mdev: DRBD device.
*
- * Forces all threads of a device onto the same CPU. This is beneficial for
+ * Forces all threads of a resource onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
+static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
{
- int ord, cpu;
+ unsigned int *resources_per_cpu, min_index = ~0;
- /* user override. */
- if (cpumask_weight(tconn->cpu_mask))
- return;
+ resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
+ if (resources_per_cpu) {
+ struct drbd_resource *resource;
+ unsigned int cpu, min = ~0;
- ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, tconn->cpu_mask);
- return;
+ rcu_read_lock();
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_cpu(cpu, resource->cpu_mask)
+ resources_per_cpu[cpu]++;
}
+ rcu_read_unlock();
+ for_each_online_cpu(cpu) {
+ if (resources_per_cpu[cpu] < min) {
+ min = resources_per_cpu[cpu];
+ min_index = cpu;
+ }
+ }
+ kfree(resources_per_cpu);
}
- /* should not be reached */
- cpumask_setall(tconn->cpu_mask);
+ if (min_index == ~0) {
+ cpumask_setall(*cpu_mask);
+ return;
+ }
+ cpumask_set_cpu(min_index, *cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
@@ -544,13 +543,16 @@ void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
*/
void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
+ struct drbd_resource *resource = thi->resource;
struct task_struct *p = current;
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
+ set_cpus_allowed_ptr(p, resource->cpu_mask);
}
+#else
+#define drbd_calc_cpu_mask(A) ({})
#endif
/**
@@ -560,9 +562,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
* word aligned on 64-bit architectures. (The bitmap send and receive code
* relies on this.)
*/
-unsigned int drbd_header_size(struct drbd_tconn *tconn)
+unsigned int drbd_header_size(struct drbd_connection *connection)
{
- if (tconn->agreed_pro_version >= 100) {
+ if (connection->agreed_pro_version >= 100) {
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
return sizeof(struct p_header100);
} else {
@@ -600,44 +602,44 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm
return sizeof(struct p_header100);
}
-static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
void *buffer, enum drbd_packet cmd, int size)
{
- if (tconn->agreed_pro_version >= 100)
+ if (connection->agreed_pro_version >= 100)
return prepare_header100(buffer, cmd, size, vnr);
- else if (tconn->agreed_pro_version >= 95 &&
+ else if (connection->agreed_pro_version >= 95 &&
size > DRBD_MAX_SIZE_H80_PACKET)
return prepare_header95(buffer, cmd, size);
else
return prepare_header80(buffer, cmd, size);
}
-static void *__conn_prepare_command(struct drbd_tconn *tconn,
+static void *__conn_prepare_command(struct drbd_connection *connection,
struct drbd_socket *sock)
{
if (!sock->socket)
return NULL;
- return sock->sbuf + drbd_header_size(tconn);
+ return sock->sbuf + drbd_header_size(connection);
}
-void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{
void *p;
mutex_lock(&sock->mutex);
- p = __conn_prepare_command(tconn, sock);
+ p = __conn_prepare_command(connection, sock);
if (!p)
mutex_unlock(&sock->mutex);
return p;
}
-void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
+void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
{
- return conn_prepare_command(mdev->tconn, sock);
+ return conn_prepare_command(peer_device->connection, sock);
}
-static int __send_command(struct drbd_tconn *tconn, int vnr,
+static int __send_command(struct drbd_connection *connection, int vnr,
struct drbd_socket *sock, enum drbd_packet cmd,
unsigned int header_size, void *data,
unsigned int size)
@@ -654,82 +656,82 @@ static int __send_command(struct drbd_tconn *tconn, int vnr,
*/
msg_flags = data ? MSG_MORE : 0;
- header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
header_size + size);
- err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
msg_flags);
if (data && !err)
- err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ err = drbd_send_all(connection, sock->socket, data, size, 0);
return err;
}
-static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
- return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+ return __send_command(connection, 0, sock, cmd, header_size, data, size);
}
-int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ err = __conn_send_command(connection, sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
- data, size);
+ err = __send_command(peer_device->connection, peer_device->device->vnr,
+ sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_ping(struct drbd_tconn *tconn)
+int drbd_send_ping(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
}
-int drbd_send_ping_ack(struct drbd_tconn *tconn)
+int drbd_send_ping_ack(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
}
-int drbd_send_sync_param(struct drbd_conf *mdev)
+int drbd_send_sync_param(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_rs_param_95 *p;
int size;
- const int apv = mdev->tconn->agreed_pro_version;
+ const int apv = peer_device->connection->agreed_pro_version;
enum drbd_packet cmd;
struct net_conf *nc;
struct disk_conf *dc;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -742,14 +744,14 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
/* initialize verify_alg and csums_alg */
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (get_ldev(mdev)) {
- dc = rcu_dereference(mdev->ldev->disk_conf);
+ if (get_ldev(peer_device->device)) {
+ dc = rcu_dereference(peer_device->device->ldev->disk_conf);
p->resync_rate = cpu_to_be32(dc->resync_rate);
p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
p->c_delay_target = cpu_to_be32(dc->c_delay_target);
p->c_fill_target = cpu_to_be32(dc->c_fill_target);
p->c_max_rate = cpu_to_be32(dc->c_max_rate);
- put_ldev(mdev);
+ put_ldev(peer_device->device);
} else {
p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
@@ -764,33 +766,33 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
strcpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock();
- return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
}
-int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
+int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{
struct drbd_socket *sock;
struct p_protocol *p;
struct net_conf *nc;
int size, cf;
- sock = &tconn->data;
- p = __conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = __conn_prepare_command(connection, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
- if (nc->tentative && tconn->agreed_pro_version < 92) {
+ if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
mutex_unlock(&sock->mutex);
- conn_err(tconn, "--dry-run is not supported by peer");
+ drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
size = sizeof(*p);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(nc->wire_protocol);
@@ -805,128 +807,131 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock();
- return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+ return __conn_send_command(connection, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_tconn *tconn)
+int drbd_send_protocol(struct drbd_connection *connection)
{
int err;
- mutex_lock(&tconn->data.mutex);
- err = __drbd_send_protocol(tconn, P_PROTOCOL);
- mutex_unlock(&tconn->data.mutex);
+ mutex_lock(&connection->data.mutex);
+ err = __drbd_send_protocol(connection, P_PROTOCOL);
+ mutex_unlock(&connection->data.mutex);
return err;
}
-int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
+static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_uuids *p;
int i;
- if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p) {
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ device->comm_bm_set = drbd_bm_total_weight(device);
+ p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
rcu_read_lock();
- uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
rcu_read_unlock();
- uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
- uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
+ uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
+ uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
- put_ldev(mdev);
- return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
+ put_ldev(device);
+ return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
-int drbd_send_uuids(struct drbd_conf *mdev)
+int drbd_send_uuids(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 0);
+ return _drbd_send_uuids(peer_device, 0);
}
-int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
+int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 8);
+ return _drbd_send_uuids(peer_device, 8);
}
-void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
+void drbd_print_uuids(struct drbd_device *device, const char *text)
{
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- u64 *uuid = mdev->ldev->md.uuid;
- dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ u64 *uuid = device->ldev->md.uuid;
+ drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
(unsigned long long)uuid[UI_HISTORY_START],
(unsigned long long)uuid[UI_HISTORY_END]);
- put_ldev(mdev);
+ put_ldev(device);
} else {
- dev_info(DEV, "%s effective data uuid: %016llX\n",
+ drbd_info(device, "%s effective data uuid: %016llX\n",
text,
- (unsigned long long)mdev->ed_uuid);
+ (unsigned long long)device->ed_uuid);
}
}
-void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_rs_uuid *p;
u64 uuid;
- D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
+ D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ uuid = device->ldev->md.uuid[UI_BITMAP];
if (uuid && uuid != UUID_JUST_CREATED)
uuid = uuid + UUID_NEW_BM_OFFSET;
else
get_random_bytes(&uuid, sizeof(u64));
- drbd_uuid_set(mdev, UI_BITMAP, uuid);
- drbd_print_uuids(mdev, "updated sync UUID");
- drbd_md_sync(mdev);
+ drbd_uuid_set(device, UI_BITMAP, uuid);
+ drbd_print_uuids(device, "updated sync UUID");
+ drbd_md_sync(device);
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->uuid = cpu_to_be64(uuid);
- drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
}
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
+int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- D_ASSERT(mdev->ldev->backing_bdev);
- d_size = drbd_get_max_capacity(mdev->ldev);
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ D_ASSERT(device, device->ldev->backing_bdev);
+ d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- q_order_type = drbd_queue_order_type(mdev);
- max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
+ q_order_type = drbd_queue_order_type(device);
+ max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
- put_ldev(mdev);
+ put_ldev(device);
} else {
d_size = 0;
u_size = 0;
@@ -934,45 +939,45 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- if (mdev->tconn->agreed_pro_version <= 94)
+ if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
- else if (mdev->tconn->agreed_pro_version < 100)
+ else if (peer_device->connection->agreed_pro_version < 100)
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
- p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
- return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
* drbd_send_current_state() - Sends the drbd state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
*/
-int drbd_send_current_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
* @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
@@ -980,73 +985,73 @@ int drbd_send_current_state(struct drbd_conf *mdev)
* between queuing and processing of the after_state_ch work, we still
* want to send each intermediary state in the order it occurred.
*/
-int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->state = cpu_to_be32(state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
struct p_req_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
}
-int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_packet cmd;
struct drbd_socket *sock;
struct p_req_state *p;
- cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
-void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
}
}
-void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
+ enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
}
@@ -1067,7 +1072,7 @@ static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
-int fill_bitmap_rle_bits(struct drbd_conf *mdev,
+static int fill_bitmap_rle_bits(struct drbd_device *device,
struct p_compressed_bm *p,
unsigned int size,
struct bm_xfer_ctx *c)
@@ -1082,9 +1087,9 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* may we use this feature? */
rcu_read_lock();
- use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
rcu_read_unlock();
- if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
return 0;
if (c->bit_offset >= c->bm_bits)
@@ -1104,8 +1109,8 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* see how much plain bits we can stuff into one packet
* using RLE and VLI. */
do {
- tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
- : _drbd_bm_find_next(mdev, c->bit_offset);
+ tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
+ : _drbd_bm_find_next(device, c->bit_offset);
if (tmp == -1UL)
tmp = c->bm_bits;
rl = tmp - c->bit_offset;
@@ -1125,7 +1130,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* paranoia: catch zero runlength.
* can only happen if bitmap is modified while we scan it. */
if (rl == 0) {
- dev_err(DEV, "unexpected zero runlength while encoding bitmap "
+ drbd_err(device, "unexpected zero runlength while encoding bitmap "
"t:%u bo:%lu\n", toggle, c->bit_offset);
return -1;
}
@@ -1134,7 +1139,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (bits == -ENOBUFS) /* buffer full */
break;
if (bits <= 0) {
- dev_err(DEV, "error while encoding bitmap: %d\n", bits);
+ drbd_err(device, "error while encoding bitmap: %d\n", bits);
return 0;
}
@@ -1171,21 +1176,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
{
- struct drbd_socket *sock = &mdev->tconn->data;
- unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
+ unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
- len = fill_bitmap_rle_bits(mdev, p,
+ len = fill_bitmap_rle_bits(device, p,
DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
- err = __send_command(mdev->tconn, mdev->vnr, sock,
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
@@ -1205,8 +1210,8 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
c->bm_words - c->word_offset);
len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
+ drbd_bm_get_lel(device, c->word_offset, num_words, p);
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -1218,7 +1223,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
if (!err) {
if (len == 0) {
- INFO_bm_xfer_stats(mdev, "send", c);
+ INFO_bm_xfer_stats(device, "send", c);
return 0;
} else
return 1;
@@ -1227,128 +1232,128 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
/* See the comment at receive_bitmap() */
-static int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_device *device)
{
struct bm_xfer_ctx c;
int err;
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return false;
- if (get_ldev(mdev)) {
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
- dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
- drbd_bm_set_all(mdev);
- if (drbd_bm_write(mdev)) {
+ if (get_ldev(device)) {
+ if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
+ drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
+ drbd_bm_set_all(device);
+ if (drbd_bm_write(device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
- dev_err(DEV, "Failed to write bitmap to disk!\n");
+ drbd_err(device, "Failed to write bitmap to disk!\n");
} else {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
- put_ldev(mdev);
+ put_ldev(device);
}
c = (struct bm_xfer_ctx) {
- .bm_bits = drbd_bm_bits(mdev),
- .bm_words = drbd_bm_words(mdev),
+ .bm_bits = drbd_bm_bits(device),
+ .bm_words = drbd_bm_words(device),
};
do {
- err = send_bitmap_rle_or_plain(mdev, &c);
+ err = send_bitmap_rle_or_plain(device, &c);
} while (err > 0);
return err == 0;
}
-int drbd_send_bitmap(struct drbd_conf *mdev)
+int drbd_send_bitmap(struct drbd_device *device)
{
- struct drbd_socket *sock = &mdev->tconn->data;
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
- err = !_drbd_send_bitmap(mdev);
+ err = !_drbd_send_bitmap(device);
mutex_unlock(&sock->mutex);
return err;
}
-void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{
struct drbd_socket *sock;
struct p_barrier_ack *p;
- if (tconn->cstate < C_WF_REPORT_PARAMS)
+ if (connection->cstate < C_WF_REPORT_PARAMS)
return;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (!p)
return;
p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size);
- conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
* _drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @cmd: Packet command code.
* @sector: sector, needs to be in big endian byte order
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
u64 sector, u32 blksize, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_ack *p;
- if (mdev->state.conn < C_CONNECTED)
+ if (peer_device->device->state.conn < C_CONNECTED)
return -EIO;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = sector;
p->block_id = block_id;
p->blksize = blksize;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_data *dp, int data_size)
{
- if (mdev->tconn->peer_integrity_tfm)
- data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
- _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ if (peer_device->connection->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
-void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_block_req *rp)
{
- _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device
+ * @device: DRBD device
* @cmd: packet command code
* @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(peer_req->i.sector),
cpu_to_be32(peer_req->i.size),
peer_req->block_id);
@@ -1356,32 +1361,32 @@ int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(sector),
cpu_to_be32(blksize),
cpu_to_be64(block_id));
}
-int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = block_id;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
void *digest, int digest_size, enum drbd_packet cmd)
{
struct drbd_socket *sock;
@@ -1389,64 +1394,63 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
/* FIXME: Put the digest into the preallocated socket buffer. */
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p),
- digest, digest_size);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
}
-int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
+int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{
int drop_it;
- /* long elapsed = (long)(jiffies - mdev->last_received); */
+ /* long elapsed = (long)(jiffies - device->last_received); */
- drop_it = tconn->meta.socket == sock
- || !tconn->asender.task
- || get_t_state(&tconn->asender) != RUNNING
- || tconn->cstate < C_WF_REPORT_PARAMS;
+ drop_it = connection->meta.socket == sock
+ || !connection->asender.task
+ || get_t_state(&connection->asender) != RUNNING
+ || connection->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--tconn->ko_count;
+ drop_it = !--connection->ko_count;
if (!drop_it) {
- conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, tconn->ko_count);
- request_ping(tconn);
+ drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, connection->ko_count);
+ request_ping(connection);
}
- return drop_it; /* && (mdev->state == R_PRIMARY) */;
+ return drop_it; /* && (device->state == R_PRIMARY) */;
}
-static void drbd_update_congested(struct drbd_tconn *tconn)
+static void drbd_update_congested(struct drbd_connection *connection)
{
- struct sock *sk = tconn->data.socket->sk;
+ struct sock *sk = connection->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &tconn->flags);
+ set_bit(NET_CONGESTED, &connection->flags);
}
/* The idea of sendpage seems to be to put some kind of reference
@@ -1470,26 +1474,26 @@ static void drbd_update_congested(struct drbd_tconn *tconn)
* As a workaround, we disable sendpage on pages
* with page_count == 0 or PageSlab.
*/
-static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket;
void *addr;
int err;
- socket = mdev->tconn->data.socket;
+ socket = peer_device->connection->data.socket;
addr = kmap(page) + offset;
- err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
+ err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
kunmap(page);
if (!err)
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
return err;
}
-static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
- struct socket *socket = mdev->tconn->data.socket;
+ struct socket *socket = peer_device->connection->data.socket;
mm_segment_t oldfs = get_fs();
int len = size;
int err = -EIO;
@@ -1501,10 +1505,10 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
- return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
+ return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev->tconn);
+ drbd_update_congested(peer_device->connection);
set_fs(KERNEL_DS);
do {
int sent;
@@ -1512,11 +1516,11 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev->tconn, socket))
+ if (we_should_drop_the_connection(peer_device->connection, socket))
break;
continue;
}
- dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
+ drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
if (sent < 0)
err = sent;
@@ -1524,18 +1528,18 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
}
len -= sent;
offset += sent;
- } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
+ } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->tconn->flags);
+ clear_bit(NET_CONGESTED, &peer_device->connection->flags);
if (len == 0) {
err = 0;
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
}
return err;
}
-static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -1544,7 +1548,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_no_send_page(mdev, bvec.bv_page,
+ err = _drbd_no_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter)
? 0 : MSG_MORE);
@@ -1554,7 +1558,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
return 0;
}
-static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -1563,7 +1567,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_send_page(mdev, bvec.bv_page,
+ err = _drbd_send_page(peer_device, bvec.bv_page,
bvec.bv_offset, bvec.bv_len,
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
@@ -1572,7 +1576,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
@@ -1583,7 +1587,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(mdev, page, 0, l,
+ err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (err)
return err;
@@ -1592,9 +1596,9 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
{
- if (mdev->tconn->agreed_pro_version >= 95)
+ if (connection->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -1606,28 +1610,30 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
-int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
unsigned int dp_flags = 0;
int dgs;
int err;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
- if (mdev->state.conn >= C_SYNC_SOURCE &&
- mdev->state.conn <= C_PAUSED_SYNC_T)
+ p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ if (device->state.conn >= C_SYNC_SOURCE &&
+ device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
- if (mdev->tconn->agreed_pro_version >= 100) {
+ if (peer_device->connection->agreed_pro_version >= 100) {
if (req->rq_state & RQ_EXP_RECEIVE_ACK)
dp_flags |= DP_SEND_RECEIVE_ACK;
if (req->rq_state & RQ_EXP_WRITE_ACK)
@@ -1635,8 +1641,8 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
p->dp_flags = cpu_to_be32(dp_flags);
if (dgs)
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
@@ -1650,18 +1656,18 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* receiving side, we sure have detected corruption elsewhere.
*/
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
- err = _drbd_send_bio(mdev, req->master_bio);
+ err = _drbd_send_bio(peer_device, req->master_bio);
else
- err = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(peer_device, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
if (memcmp(p + 1, digest, dgs)) {
- dev_warn(DEV,
+ drbd_warn(device,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
(unsigned long long)req->i.sector, req->i.size);
}
@@ -1678,18 +1684,20 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
int err;
int dgs;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1698,27 +1706,27 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
p->seq_num = 0; /* unused */
p->dp_flags = 0;
if (dgs)
- drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
if (!err)
- err = _drbd_send_zc_ee(mdev, peer_req);
+ err = _drbd_send_zc_ee(peer_device, peer_req);
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
}
-int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_socket *sock;
struct p_block_desc *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->blksize = cpu_to_be32(req->i.size);
- return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -1737,7 +1745,7 @@ int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -1758,11 +1766,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == tconn->data.socket) {
+ if (sock == connection->data.socket) {
rcu_read_lock();
- tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
rcu_read_unlock();
- drbd_update_congested(tconn);
+ drbd_update_congested(connection);
}
do {
/* STRANGE
@@ -1776,7 +1784,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(tconn, sock))
+ if (we_should_drop_the_connection(connection, sock))
break;
else
continue;
@@ -1792,17 +1800,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == tconn->data.socket)
- clear_bit(NET_CONGESTED, &tconn->flags);
+ if (sock == connection->data.socket)
+ clear_bit(NET_CONGESTED, &connection->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- conn_err(tconn, "%s_sendmsg returned %d\n",
- sock == tconn->meta.socket ? "msock" : "sock",
+ drbd_err(connection, "%s_sendmsg returned %d\n",
+ sock == connection->meta.socket ? "msock" : "sock",
rv);
- conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
+ conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
@@ -1813,12 +1821,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*
* Returns 0 upon success and a negative error value otherwise.
*/
-int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
size_t size, unsigned msg_flags)
{
int err;
- err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ err = drbd_send(connection, sock, buffer, size, msg_flags);
if (err < 0)
return err;
if (err != size)
@@ -1828,16 +1836,16 @@ int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
- struct drbd_conf *mdev = bdev->bd_disk->private_data;
+ struct drbd_device *device = bdev->bd_disk->private_data;
unsigned long flags;
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- /* to have a stable mdev->state.role
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ /* to have a stable device->state.role
* and no race with updating open_cnt */
- if (mdev->state.role != R_PRIMARY) {
+ if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
else if (!allow_oos)
@@ -1845,8 +1853,8 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
}
if (!rv)
- mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ device->open_cnt++;
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -1854,17 +1862,17 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
static void drbd_release(struct gendisk *gd, fmode_t mode)
{
- struct drbd_conf *mdev = gd->private_data;
+ struct drbd_device *device = gd->private_data;
mutex_lock(&drbd_main_mutex);
- mdev->open_cnt--;
+ device->open_cnt--;
mutex_unlock(&drbd_main_mutex);
}
-static void drbd_set_defaults(struct drbd_conf *mdev)
+static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
* between big endian and little endian */
- mdev->state = (union drbd_dev_state) {
+ device->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
@@ -1873,130 +1881,123 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
} };
}
-void drbd_init_set_defaults(struct drbd_conf *mdev)
+void drbd_init_set_defaults(struct drbd_device *device)
{
/* the memset(,0,) did most of this.
* note: only assignments, no allocation in here */
- drbd_set_defaults(mdev);
-
- atomic_set(&mdev->ap_bio_cnt, 0);
- atomic_set(&mdev->ap_pending_cnt, 0);
- atomic_set(&mdev->rs_pending_cnt, 0);
- atomic_set(&mdev->unacked_cnt, 0);
- atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->pp_in_use_by_net, 0);
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- atomic_set(&mdev->ap_in_flight, 0);
- atomic_set(&mdev->md_io_in_use, 0);
-
- mutex_init(&mdev->own_state_mutex);
- mdev->state_mutex = &mdev->own_state_mutex;
-
- spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->peer_seq_lock);
-
- INIT_LIST_HEAD(&mdev->active_ee);
- INIT_LIST_HEAD(&mdev->sync_ee);
- INIT_LIST_HEAD(&mdev->done_ee);
- INIT_LIST_HEAD(&mdev->read_ee);
- INIT_LIST_HEAD(&mdev->net_ee);
- INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->resync_work.list);
- INIT_LIST_HEAD(&mdev->unplug_work.list);
- INIT_LIST_HEAD(&mdev->go_diskless.list);
- INIT_LIST_HEAD(&mdev->md_sync_work.list);
- INIT_LIST_HEAD(&mdev->start_resync_work.list);
- INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
-
- mdev->resync_work.cb = w_resync_timer;
- mdev->unplug_work.cb = w_send_write_hint;
- mdev->go_diskless.cb = w_go_diskless;
- mdev->md_sync_work.cb = w_md_sync;
- mdev->bm_io_work.w.cb = w_bitmap_io;
- mdev->start_resync_work.cb = w_start_resync;
-
- mdev->resync_work.mdev = mdev;
- mdev->unplug_work.mdev = mdev;
- mdev->go_diskless.mdev = mdev;
- mdev->md_sync_work.mdev = mdev;
- mdev->bm_io_work.w.mdev = mdev;
- mdev->start_resync_work.mdev = mdev;
-
- init_timer(&mdev->resync_timer);
- init_timer(&mdev->md_sync_timer);
- init_timer(&mdev->start_resync_timer);
- init_timer(&mdev->request_timer);
- mdev->resync_timer.function = resync_timer_fn;
- mdev->resync_timer.data = (unsigned long) mdev;
- mdev->md_sync_timer.function = md_sync_timer_fn;
- mdev->md_sync_timer.data = (unsigned long) mdev;
- mdev->start_resync_timer.function = start_resync_timer_fn;
- mdev->start_resync_timer.data = (unsigned long) mdev;
- mdev->request_timer.function = request_timer_fn;
- mdev->request_timer.data = (unsigned long) mdev;
-
- init_waitqueue_head(&mdev->misc_wait);
- init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->ee_wait);
- init_waitqueue_head(&mdev->al_wait);
- init_waitqueue_head(&mdev->seq_wait);
-
- mdev->resync_wenr = LC_FREE;
- mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
- mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
-}
-
-void drbd_mdev_cleanup(struct drbd_conf *mdev)
+ drbd_set_defaults(device);
+
+ atomic_set(&device->ap_bio_cnt, 0);
+ atomic_set(&device->ap_pending_cnt, 0);
+ atomic_set(&device->rs_pending_cnt, 0);
+ atomic_set(&device->unacked_cnt, 0);
+ atomic_set(&device->local_cnt, 0);
+ atomic_set(&device->pp_in_use_by_net, 0);
+ atomic_set(&device->rs_sect_in, 0);
+ atomic_set(&device->rs_sect_ev, 0);
+ atomic_set(&device->ap_in_flight, 0);
+ atomic_set(&device->md_io_in_use, 0);
+
+ mutex_init(&device->own_state_mutex);
+ device->state_mutex = &device->own_state_mutex;
+
+ spin_lock_init(&device->al_lock);
+ spin_lock_init(&device->peer_seq_lock);
+
+ INIT_LIST_HEAD(&device->active_ee);
+ INIT_LIST_HEAD(&device->sync_ee);
+ INIT_LIST_HEAD(&device->done_ee);
+ INIT_LIST_HEAD(&device->read_ee);
+ INIT_LIST_HEAD(&device->net_ee);
+ INIT_LIST_HEAD(&device->resync_reads);
+ INIT_LIST_HEAD(&device->resync_work.list);
+ INIT_LIST_HEAD(&device->unplug_work.list);
+ INIT_LIST_HEAD(&device->go_diskless.list);
+ INIT_LIST_HEAD(&device->md_sync_work.list);
+ INIT_LIST_HEAD(&device->start_resync_work.list);
+ INIT_LIST_HEAD(&device->bm_io_work.w.list);
+
+ device->resync_work.cb = w_resync_timer;
+ device->unplug_work.cb = w_send_write_hint;
+ device->go_diskless.cb = w_go_diskless;
+ device->md_sync_work.cb = w_md_sync;
+ device->bm_io_work.w.cb = w_bitmap_io;
+ device->start_resync_work.cb = w_start_resync;
+
+ init_timer(&device->resync_timer);
+ init_timer(&device->md_sync_timer);
+ init_timer(&device->start_resync_timer);
+ init_timer(&device->request_timer);
+ device->resync_timer.function = resync_timer_fn;
+ device->resync_timer.data = (unsigned long) device;
+ device->md_sync_timer.function = md_sync_timer_fn;
+ device->md_sync_timer.data = (unsigned long) device;
+ device->start_resync_timer.function = start_resync_timer_fn;
+ device->start_resync_timer.data = (unsigned long) device;
+ device->request_timer.function = request_timer_fn;
+ device->request_timer.data = (unsigned long) device;
+
+ init_waitqueue_head(&device->misc_wait);
+ init_waitqueue_head(&device->state_wait);
+ init_waitqueue_head(&device->ee_wait);
+ init_waitqueue_head(&device->al_wait);
+ init_waitqueue_head(&device->seq_wait);
+
+ device->resync_wenr = LC_FREE;
+ device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+ device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+}
+
+void drbd_device_cleanup(struct drbd_device *device)
{
int i;
- if (mdev->tconn->receiver.t_state != NONE)
- dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->tconn->receiver.t_state);
-
- mdev->al_writ_cnt =
- mdev->bm_writ_cnt =
- mdev->read_cnt =
- mdev->recv_cnt =
- mdev->send_cnt =
- mdev->writ_cnt =
- mdev->p_size =
- mdev->rs_start =
- mdev->rs_total =
- mdev->rs_failed = 0;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
+ if (first_peer_device(device)->connection->receiver.t_state != NONE)
+ drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
+ first_peer_device(device)->connection->receiver.t_state);
+
+ device->al_writ_cnt =
+ device->bm_writ_cnt =
+ device->read_cnt =
+ device->recv_cnt =
+ device->send_cnt =
+ device->writ_cnt =
+ device->p_size =
+ device->rs_start =
+ device->rs_total =
+ device->rs_failed = 0;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = 0;
- mdev->rs_mark_time[i] = 0;
+ device->rs_mark_left[i] = 0;
+ device->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->tconn->net_conf == NULL);
+ D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- drbd_set_my_capacity(mdev, 0);
- if (mdev->bitmap) {
+ drbd_set_my_capacity(device, 0);
+ if (device->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0, 1);
- drbd_bm_cleanup(mdev);
+ drbd_bm_resize(device, 0, 1);
+ drbd_bm_cleanup(device);
}
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- clear_bit(AL_SUSPENDED, &mdev->flags);
+ clear_bit(AL_SUSPENDED, &device->flags);
- D_ASSERT(list_empty(&mdev->active_ee));
- D_ASSERT(list_empty(&mdev->sync_ee));
- D_ASSERT(list_empty(&mdev->done_ee));
- D_ASSERT(list_empty(&mdev->read_ee));
- D_ASSERT(list_empty(&mdev->net_ee));
- D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
- D_ASSERT(list_empty(&mdev->resync_work.list));
- D_ASSERT(list_empty(&mdev->unplug_work.list));
- D_ASSERT(list_empty(&mdev->go_diskless.list));
+ D_ASSERT(device, list_empty(&device->active_ee));
+ D_ASSERT(device, list_empty(&device->sync_ee));
+ D_ASSERT(device, list_empty(&device->done_ee));
+ D_ASSERT(device, list_empty(&device->read_ee));
+ D_ASSERT(device, list_empty(&device->net_ee));
+ D_ASSERT(device, list_empty(&device->resync_reads));
+ D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
+ D_ASSERT(device, list_empty(&device->resync_work.list));
+ D_ASSERT(device, list_empty(&device->unplug_work.list));
+ D_ASSERT(device, list_empty(&device->go_diskless.list));
- drbd_set_defaults(mdev);
+ drbd_set_defaults(device);
}
@@ -2011,7 +2012,7 @@ static void drbd_destroy_mempools(void)
drbd_pp_vacant--;
}
- /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
if (drbd_md_io_bio_set)
bioset_free(drbd_md_io_bio_set);
@@ -2131,69 +2132,73 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_device *device)
{
int rr;
- rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(device, &device->active_ee);
if (rr)
- dev_err(DEV, "%d EEs in active list found!\n", rr);
+ drbd_err(device, "%d EEs in active list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(device, &device->sync_ee);
if (rr)
- dev_err(DEV, "%d EEs in sync list found!\n", rr);
+ drbd_err(device, "%d EEs in sync list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(device, &device->read_ee);
if (rr)
- dev_err(DEV, "%d EEs in read list found!\n", rr);
+ drbd_err(device, "%d EEs in read list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
- dev_err(DEV, "%d EEs in done list found!\n", rr);
+ drbd_err(device, "%d EEs in done list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(device, &device->net_ee);
if (rr)
- dev_err(DEV, "%d EEs in net list found!\n", rr);
+ drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
-void drbd_minor_destroy(struct kref *kref)
+void drbd_destroy_device(struct kref *kref)
{
- struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = container_of(kref, struct drbd_device, kref);
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
- del_timer_sync(&mdev->request_timer);
+ del_timer_sync(&device->request_timer);
/* paranoia asserts */
- D_ASSERT(mdev->open_cnt == 0);
+ D_ASSERT(device, device->open_cnt == 0);
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
- if (mdev->this_bdev)
- bdput(mdev->this_bdev);
+ if (device->this_bdev)
+ bdput(device->this_bdev);
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- drbd_release_all_peer_reqs(mdev);
+ drbd_release_all_peer_reqs(device);
- lc_destroy(mdev->act_log);
- lc_destroy(mdev->resync);
+ lc_destroy(device->act_log);
+ lc_destroy(device->resync);
- kfree(mdev->p_uuid);
- /* mdev->p_uuid = NULL; */
+ kfree(device->p_uuid);
+ /* device->p_uuid = NULL; */
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- kfree(mdev->rs_plan_s);
- kfree(mdev);
+ if (device->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(device);
+ __free_page(device->md_io_page);
+ put_disk(device->vdisk);
+ blk_cleanup_queue(device->rq_queue);
+ kfree(device->rs_plan_s);
+ kfree(first_peer_device(device));
+ kfree(device);
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_connection(connection, resource)
+ kref_put(&connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
/* One global retry thread, if we need to push back some bio and have it
@@ -2218,19 +2223,19 @@ static void do_retry(struct work_struct *ws)
spin_unlock_irq(&retry->lock);
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
unsigned long start_time = req->start_time;
bool expected;
- expected =
+ expected =
expect(atomic_read(&req->completion_ref) == 0) &&
expect(req->rq_state & RQ_POSTPONED) &&
expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
(req->rq_state & RQ_LOCAL_ABORTED) != 0);
if (!expected)
- dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
req, atomic_read(&req->completion_ref),
req->rq_state);
@@ -2254,8 +2259,8 @@ static void do_retry(struct work_struct *ws)
/* We are not just doing generic_make_request(),
* as we want to keep the start_time information. */
- inc_ap_bio(mdev);
- __drbd_make_request(mdev, bio, start_time);
+ inc_ap_bio(device);
+ __drbd_make_request(device, bio, start_time);
}
}
@@ -2269,17 +2274,38 @@ void drbd_restart_request(struct drbd_request *req)
/* Drop the extra reference that would otherwise
* have been dropped by complete_master_bio.
* do_retry() needs to grab a new one. */
- dec_ap_bio(req->w.mdev);
+ dec_ap_bio(req->device);
queue_work(retry.wq, &retry.worker);
}
+void drbd_destroy_resource(struct kref *kref)
+{
+ struct drbd_resource *resource =
+ container_of(kref, struct drbd_resource, kref);
+
+ idr_destroy(&resource->devices);
+ free_cpumask_var(resource->cpu_mask);
+ kfree(resource->name);
+ kfree(resource);
+}
+
+void drbd_free_resource(struct drbd_resource *resource)
+{
+ struct drbd_connection *connection, *tmp;
+
+ for_each_connection_safe(connection, tmp, resource) {
+ list_del(&connection->connections);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ kref_put(&resource->kref, drbd_destroy_resource);
+}
static void drbd_cleanup(void)
{
unsigned int i;
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn, *tmp;
+ struct drbd_device *device;
+ struct drbd_resource *resource, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -2299,26 +2325,19 @@ static void drbd_cleanup(void)
drbd_genl_unregister();
- idr_for_each_entry(&minors, mdev, i) {
- idr_remove(&minors, mdev_to_minor(mdev));
- idr_remove(&mdev->tconn->volumes, mdev->vnr);
- destroy_workqueue(mdev->submit.wq);
- del_gendisk(mdev->vdisk);
- /* synchronize_rcu(); No other threads running at this point */
- kref_put(&mdev->kref, &drbd_minor_destroy);
- }
+ idr_for_each_entry(&drbd_devices, device, i)
+ drbd_delete_device(device);
/* not _rcu since, no other updater anymore. Genl already unregistered */
- list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
- list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
- /* synchronize_rcu(); */
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_resource_safe(resource, tmp, &drbd_resources) {
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
}
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
- idr_destroy(&minors);
+ idr_destroy(&drbd_devices);
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -2332,49 +2351,50 @@ static void drbd_cleanup(void)
*/
static int drbd_congested(void *congested_data, int bdi_bits)
{
- struct drbd_conf *mdev = congested_data;
+ struct drbd_device *device = congested_data;
struct request_queue *q;
char reason = '-';
int r = 0;
- if (!may_inc_ap_bio(mdev)) {
+ if (!may_inc_ap_bio(device)) {
/* DRBD has frozen IO */
r = bdi_bits;
reason = 'd';
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
+ if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
* currently blocked waiting for that usermode helper to
* finish.
*/
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ if (!get_ldev_if_state(device, D_UP_TO_DATE))
r |= (1 << BDI_sync_congested);
else
- put_ldev(mdev);
+ put_ldev(device);
r &= bdi_bits;
reason = 'c';
goto out;
}
- if (get_ldev(mdev)) {
- q = bdev_get_queue(mdev->ldev->backing_bdev);
+ if (get_ldev(device)) {
+ q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
- put_ldev(mdev);
+ put_ldev(device);
if (r)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) &&
+ test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
out:
- mdev->congestion_reason = reason;
+ device->congestion_reason = reason;
return r;
}
@@ -2385,45 +2405,72 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq)
init_waitqueue_head(&wq->q_wait);
}
-struct drbd_tconn *conn_get_by_name(const char *name)
+struct completion_work {
+ struct drbd_work w;
+ struct completion done;
+};
+
+static int w_complete(struct drbd_work *w, int cancel)
+{
+ struct completion_work *completion_work =
+ container_of(w, struct completion_work, w);
+
+ complete(&completion_work->done);
+ return 0;
+}
+
+void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
+{
+ struct completion_work completion_work;
+
+ completion_work.w.cb = w_complete;
+ init_completion(&completion_work.done);
+ drbd_queue_work(work_queue, &completion_work.w);
+ wait_for_completion(&completion_work.done);
+}
+
+struct drbd_resource *drbd_find_resource(const char *name)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
if (!name || !name[0])
return NULL;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (!strcmp(tconn->name, name)) {
- kref_get(&tconn->kref);
+ for_each_resource_rcu(resource, &drbd_resources) {
+ if (!strcmp(resource->name, name)) {
+ kref_get(&resource->kref);
goto found;
}
}
- tconn = NULL;
+ resource = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return resource;
}
-struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (tconn->my_addr_len == my_addr_len &&
- tconn->peer_addr_len == peer_addr_len &&
- !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
- !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
- kref_get(&tconn->kref);
- goto found;
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_connection_rcu(connection, resource) {
+ if (connection->my_addr_len == my_addr_len &&
+ connection->peer_addr_len == peer_addr_len &&
+ !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&connection->kref);
+ goto found;
+ }
}
}
- tconn = NULL;
+ connection = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return connection;
}
static int drbd_alloc_socket(struct drbd_socket *socket)
@@ -2443,29 +2490,30 @@ static void drbd_free_socket(struct drbd_socket *socket)
free_page((unsigned long) socket->rbuf);
}
-void conn_free_crypto(struct drbd_tconn *tconn)
+void conn_free_crypto(struct drbd_connection *connection)
{
- drbd_free_sock(tconn);
+ drbd_free_sock(connection);
- crypto_free_hash(tconn->csums_tfm);
- crypto_free_hash(tconn->verify_tfm);
- crypto_free_hash(tconn->cram_hmac_tfm);
- crypto_free_hash(tconn->integrity_tfm);
- crypto_free_hash(tconn->peer_integrity_tfm);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
+ crypto_free_hash(connection->csums_tfm);
+ crypto_free_hash(connection->verify_tfm);
+ crypto_free_hash(connection->cram_hmac_tfm);
+ crypto_free_hash(connection->integrity_tfm);
+ crypto_free_hash(connection->peer_integrity_tfm);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
- tconn->csums_tfm = NULL;
- tconn->verify_tfm = NULL;
- tconn->cram_hmac_tfm = NULL;
- tconn->integrity_tfm = NULL;
- tconn->peer_integrity_tfm = NULL;
- tconn->int_dig_in = NULL;
- tconn->int_dig_vv = NULL;
+ connection->csums_tfm = NULL;
+ connection->verify_tfm = NULL;
+ connection->cram_hmac_tfm = NULL;
+ connection->integrity_tfm = NULL;
+ connection->peer_integrity_tfm = NULL;
+ connection->int_dig_in = NULL;
+ connection->int_dig_vv = NULL;
}
-int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
{
+ struct drbd_connection *connection;
cpumask_var_t new_cpu_mask;
int err;
@@ -2478,22 +2526,24 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
- /* FIXME: Get rid of constant 32 here */
- err = bitmap_parse(res_opts->cpu_mask, 32,
+ err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) {
- conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
/* retcode = ERR_CPU_MASK_PARSE; */
goto fail;
}
}
- tconn->res_opts = *res_opts;
- if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
- cpumask_copy(tconn->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(tconn);
- tconn->receiver.reset_cpu_mask = 1;
- tconn->asender.reset_cpu_mask = 1;
- tconn->worker.reset_cpu_mask = 1;
+ resource->res_opts = *res_opts;
+ if (cpumask_empty(new_cpu_mask))
+ drbd_calc_cpu_mask(&new_cpu_mask);
+ if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(resource->cpu_mask, new_cpu_mask);
+ for_each_connection_rcu(connection, resource) {
+ connection->receiver.reset_cpu_mask = 1;
+ connection->asender.reset_cpu_mask = 1;
+ connection->worker.reset_cpu_mask = 1;
+ }
}
err = 0;
@@ -2503,146 +2553,177 @@ fail:
}
+struct drbd_resource *drbd_create_resource(const char *name)
+{
+ struct drbd_resource *resource;
+
+ resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
+ if (!resource)
+ goto fail;
+ resource->name = kstrdup(name, GFP_KERNEL);
+ if (!resource->name)
+ goto fail_free_resource;
+ if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
+ goto fail_free_name;
+ kref_init(&resource->kref);
+ idr_init(&resource->devices);
+ INIT_LIST_HEAD(&resource->connections);
+ list_add_tail_rcu(&resource->resources, &drbd_resources);
+ mutex_init(&resource->conf_update);
+ spin_lock_init(&resource->req_lock);
+ return resource;
+
+fail_free_name:
+ kfree(resource->name);
+fail_free_resource:
+ kfree(resource);
+fail:
+ return NULL;
+}
+
/* caller must be under genl_lock() */
-struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
- tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
- if (!tconn)
+ connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
+ if (!connection)
return NULL;
- tconn->name = kstrdup(name, GFP_KERNEL);
- if (!tconn->name)
+ if (drbd_alloc_socket(&connection->data))
goto fail;
-
- if (drbd_alloc_socket(&tconn->data))
- goto fail;
- if (drbd_alloc_socket(&tconn->meta))
+ if (drbd_alloc_socket(&connection->meta))
goto fail;
- if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!connection->current_epoch)
goto fail;
- if (set_resource_options(tconn, res_opts))
- goto fail;
+ INIT_LIST_HEAD(&connection->transfer_log);
- tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!tconn->current_epoch)
- goto fail;
+ INIT_LIST_HEAD(&connection->current_epoch->list);
+ connection->epochs = 1;
+ spin_lock_init(&connection->epoch_lock);
+ connection->write_ordering = WO_bdev_flush;
- INIT_LIST_HEAD(&tconn->transfer_log);
+ connection->send.seen_any_write_yet = false;
+ connection->send.current_epoch_nr = 0;
+ connection->send.current_epoch_writes = 0;
- INIT_LIST_HEAD(&tconn->current_epoch->list);
- tconn->epochs = 1;
- spin_lock_init(&tconn->epoch_lock);
- tconn->write_ordering = WO_bdev_flush;
+ resource = drbd_create_resource(name);
+ if (!resource)
+ goto fail;
- tconn->send.seen_any_write_yet = false;
- tconn->send.current_epoch_nr = 0;
- tconn->send.current_epoch_writes = 0;
+ connection->cstate = C_STANDALONE;
+ mutex_init(&connection->cstate_mutex);
+ init_waitqueue_head(&connection->ping_wait);
+ idr_init(&connection->peer_devices);
- tconn->cstate = C_STANDALONE;
- mutex_init(&tconn->cstate_mutex);
- spin_lock_init(&tconn->req_lock);
- mutex_init(&tconn->conf_update);
- init_waitqueue_head(&tconn->ping_wait);
- idr_init(&tconn->volumes);
+ drbd_init_workqueue(&connection->sender_work);
+ mutex_init(&connection->data.mutex);
+ mutex_init(&connection->meta.mutex);
- drbd_init_workqueue(&tconn->sender_work);
- mutex_init(&tconn->data.mutex);
- mutex_init(&tconn->meta.mutex);
+ drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
+ connection->receiver.connection = connection;
+ drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
+ connection->worker.connection = connection;
+ drbd_thread_init(resource, &connection->asender, drbd_asender, "asender");
+ connection->asender.connection = connection;
- drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
- drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
- drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+ kref_init(&connection->kref);
- kref_init(&tconn->kref);
- list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+ connection->resource = resource;
- return tconn;
+ if (set_resource_options(resource, res_opts))
+ goto fail_resource;
-fail:
- kfree(tconn->current_epoch);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn);
+ kref_get(&resource->kref);
+ list_add_tail_rcu(&connection->connections, &resource->connections);
+ return connection;
+fail_resource:
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
+fail:
+ kfree(connection->current_epoch);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection);
return NULL;
}
-void conn_destroy(struct kref *kref)
+void drbd_destroy_connection(struct kref *kref)
{
- struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
+ struct drbd_resource *resource = connection->resource;
- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
- kfree(tconn->current_epoch);
+ if (atomic_read(&connection->current_epoch->epoch_size) != 0)
+ drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
+ kfree(connection->current_epoch);
- idr_destroy(&tconn->volumes);
+ idr_destroy(&connection->peer_devices);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
- kfree(tconn);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+ kfree(connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
-int init_submitter(struct drbd_conf *mdev)
+static int init_submitter(struct drbd_device *device)
{
/* opencoded create_singlethread_workqueue(),
* to be able to say "drbd%d", ..., minor */
- mdev->submit.wq = alloc_workqueue("drbd%u_submit",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
- if (!mdev->submit.wq)
+ device->submit.wq = alloc_workqueue("drbd%u_submit",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
+ if (!device->submit.wq)
return -ENOMEM;
- INIT_WORK(&mdev->submit.worker, do_submit);
- spin_lock_init(&mdev->submit.lock);
- INIT_LIST_HEAD(&mdev->submit.writes);
+ INIT_WORK(&device->submit.worker, do_submit);
+ spin_lock_init(&device->submit.lock);
+ INIT_LIST_HEAD(&device->submit.writes);
return 0;
}
-enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
{
- struct drbd_conf *mdev;
+ struct drbd_connection *connection;
+ struct drbd_device *device;
+ struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
struct request_queue *q;
- int vnr_got = vnr;
- int minor_got = minor;
+ int id;
enum drbd_ret_code err = ERR_NOMEM;
- mdev = minor_to_mdev(minor);
- if (mdev)
+ device = minor_to_device(minor);
+ if (device)
return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
- mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
- if (!mdev)
+ device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
+ if (!device)
return ERR_NOMEM;
+ kref_init(&device->kref);
- kref_get(&tconn->kref);
- mdev->tconn = tconn;
+ kref_get(&resource->kref);
+ device->resource = resource;
+ device->minor = minor;
+ device->vnr = vnr;
- mdev->minor = minor;
- mdev->vnr = vnr;
-
- drbd_init_set_defaults(mdev);
+ drbd_init_set_defaults(device);
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
goto out_no_q;
- mdev->rq_queue = q;
- q->queuedata = mdev;
+ device->rq_queue = q;
+ q->queuedata = device;
disk = alloc_disk(1);
if (!disk)
goto out_no_disk;
- mdev->vdisk = disk;
+ device->vdisk = disk;
set_disk_ro(disk, true);
@@ -2651,14 +2732,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
disk->first_minor = minor;
disk->fops = &drbd_ops;
sprintf(disk->disk_name, "drbd%d", minor);
- disk->private_data = mdev;
+ disk->private_data = device;
- mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
+ device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
/* we have no partitions. we contain only ourselves. */
- mdev->this_bdev->bd_contains = mdev->this_bdev;
+ device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = mdev;
+ q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
@@ -2667,70 +2748,125 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
+ q->queue_lock = &resource->req_lock;
- mdev->md_io_page = alloc_page(GFP_KERNEL);
- if (!mdev->md_io_page)
+ device->md_io_page = alloc_page(GFP_KERNEL);
+ if (!device->md_io_page)
goto out_no_io_page;
- if (drbd_bm_init(mdev))
+ if (drbd_bm_init(device))
goto out_no_bitmap;
- mdev->read_requests = RB_ROOT;
- mdev->write_requests = RB_ROOT;
+ device->read_requests = RB_ROOT;
+ device->write_requests = RB_ROOT;
- minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
- if (minor_got < 0) {
- if (minor_got == -ENOSPC) {
+ id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
err = ERR_MINOR_EXISTS;
drbd_msg_put_info("requested minor exists already");
}
goto out_no_minor_idr;
}
+ kref_get(&device->kref);
- vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
- if (vnr_got < 0) {
- if (vnr_got == -ENOSPC) {
- err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
+ id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
}
goto out_idr_remove_minor;
}
+ kref_get(&device->kref);
+
+ INIT_LIST_HEAD(&device->peer_devices);
+ for_each_connection(connection, resource) {
+ peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
+ if (!peer_device)
+ goto out_idr_remove_from_resource;
+ peer_device->connection = connection;
+ peer_device->device = device;
+
+ list_add(&peer_device->peer_devices, &device->peer_devices);
+ kref_get(&device->kref);
- if (init_submitter(mdev)) {
+ id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ }
+ goto out_idr_remove_from_resource;
+ }
+ kref_get(&connection->kref);
+ }
+
+ if (init_submitter(device)) {
err = ERR_NOMEM;
drbd_msg_put_info("unable to create submit workqueue");
goto out_idr_remove_vol;
}
add_disk(disk);
- kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
/* inherit the connection state */
- mdev->state.conn = tconn->cstate;
- if (mdev->state.conn == C_WF_REPORT_PARAMS)
- drbd_connected(mdev);
+ device->state.conn = first_connection(resource)->cstate;
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ for_each_peer_device(peer_device, device)
+ drbd_connected(peer_device);
+ }
return NO_ERROR;
out_idr_remove_vol:
- idr_remove(&tconn->volumes, vnr_got);
+ idr_remove(&connection->peer_devices, vnr);
+out_idr_remove_from_resource:
+ for_each_connection(connection, resource) {
+ peer_device = idr_find(&connection->peer_devices, vnr);
+ if (peer_device) {
+ idr_remove(&connection->peer_devices, vnr);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ }
+ for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
+ list_del(&peer_device->peer_devices);
+ kfree(peer_device);
+ }
+ idr_remove(&resource->devices, vnr);
out_idr_remove_minor:
- idr_remove(&minors, minor_got);
+ idr_remove(&drbd_devices, minor);
synchronize_rcu();
out_no_minor_idr:
- drbd_bm_cleanup(mdev);
+ drbd_bm_cleanup(device);
out_no_bitmap:
- __free_page(mdev->md_io_page);
+ __free_page(device->md_io_page);
out_no_io_page:
put_disk(disk);
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- kfree(mdev);
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&resource->kref, drbd_destroy_resource);
+ kfree(device);
return err;
}
+void drbd_delete_device(struct drbd_device *device)
+{
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
+ int refs = 3;
+
+ for_each_connection(connection, resource) {
+ idr_remove(&connection->peer_devices, device->vnr);
+ refs++;
+ }
+ idr_remove(&resource->devices, device->vnr);
+ idr_remove(&drbd_devices, device_to_minor(device));
+ del_gendisk(device->vdisk);
+ synchronize_rcu();
+ kref_sub(&device->kref, refs, drbd_destroy_device);
+}
+
int __init drbd_init(void)
{
int err;
@@ -2761,10 +2897,10 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- idr_init(&minors);
+ idr_init(&drbd_devices);
rwlock_init(&global_state_lock);
- INIT_LIST_HEAD(&drbd_tconns);
+ INIT_LIST_HEAD(&drbd_resources);
err = drbd_genl_register();
if (err) {
@@ -2822,37 +2958,39 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_tconn *tconn)
+void drbd_free_sock(struct drbd_connection *connection)
{
- if (tconn->data.socket) {
- mutex_lock(&tconn->data.mutex);
- kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
- sock_release(tconn->data.socket);
- tconn->data.socket = NULL;
- mutex_unlock(&tconn->data.mutex);
+ if (connection->data.socket) {
+ mutex_lock(&connection->data.mutex);
+ kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
+ sock_release(connection->data.socket);
+ connection->data.socket = NULL;
+ mutex_unlock(&connection->data.mutex);
}
- if (tconn->meta.socket) {
- mutex_lock(&tconn->meta.mutex);
- kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
- sock_release(tconn->meta.socket);
- tconn->meta.socket = NULL;
- mutex_unlock(&tconn->meta.mutex);
+ if (connection->meta.socket) {
+ mutex_lock(&connection->meta.mutex);
+ kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
+ sock_release(connection->meta.socket);
+ connection->meta.socket = NULL;
+ mutex_unlock(&connection->meta.mutex);
}
}
/* meta data management */
-void conn_md_sync(struct drbd_tconn *tconn)
+void conn_md_sync(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_md_sync(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_md_sync(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
@@ -2883,7 +3021,7 @@ struct meta_data_on_disk {
-void drbd_md_write(struct drbd_conf *mdev, void *b)
+void drbd_md_write(struct drbd_device *device, void *b)
{
struct meta_data_on_disk *buffer = b;
sector_t sector;
@@ -2891,39 +3029,39 @@ void drbd_md_write(struct drbd_conf *mdev, void *b)
memset(buffer, 0, sizeof(*buffer));
- buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
for (i = UI_CURRENT; i < UI_SIZE; i++)
- buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
+ buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ buffer->flags = cpu_to_be32(device->ldev->md.flags);
buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
- buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
- buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
- buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
+ buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
+ buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
+ buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
- buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
+ buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
- buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
- buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
+ buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
+ buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
- buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
- buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+ buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
+ buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
- D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
- sector = mdev->ldev->md.md_offset;
+ D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
+ sector = device->ldev->md.md_offset;
- if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
/* this was a try anyways ... */
- dev_err(DEV, "meta data update failed!\n");
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_err(device, "meta data update failed!\n");
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
}
}
/**
* drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-void drbd_md_sync(struct drbd_conf *mdev)
+void drbd_md_sync(struct drbd_device *device)
{
struct meta_data_on_disk *buffer;
@@ -2931,32 +3069,32 @@ void drbd_md_sync(struct drbd_conf *mdev)
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
- del_timer(&mdev->md_sync_timer);
+ del_timer(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
- if (!get_ldev_if_state(mdev, D_FAILED))
+ if (!get_ldev_if_state(device, D_FAILED))
return;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
goto out;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
- /* Update mdev->ldev->md.la_size_sect,
+ /* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
- mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
+ device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
out:
- put_ldev(mdev);
+ put_ldev(device);
}
-static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+static int check_activity_log_stripe_size(struct drbd_device *device,
struct meta_data_on_disk *on_disk,
struct drbd_md *in_core)
{
@@ -2996,12 +3134,12 @@ static int check_activity_log_stripe_size(struct drbd_conf *mdev,
return 0;
err:
- dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+ drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
al_stripes, al_stripe_size_4k);
return -EINVAL;
}
-static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
sector_t capacity = drbd_get_capacity(bdev->md_bdev);
struct drbd_md *in_core = &bdev->md;
@@ -3068,7 +3206,7 @@ static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_d
return 0;
err:
- dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+ drbd_err(device, "meta data offsets don't make sense: idx=%d "
"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
in_core->meta_dev_idx,
@@ -3083,25 +3221,25 @@ err:
/**
* drbd_md_read() - Reads in the meta data super block
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
* Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
*
* Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
- * even before @bdev is assigned to @mdev->ldev.
+ * even before @bdev is assigned to @device->ldev.
*/
-int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
u32 magic, flags;
int i, rv = NO_ERROR;
- if (mdev->state.disk != D_DISKLESS)
+ if (device->state.disk != D_DISKLESS)
return ERR_DISK_CONFIGURED;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
return ERR_NOMEM;
@@ -3110,10 +3248,10 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
bdev->md.md_offset = drbd_md_ss(bdev);
- if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
- dev_err(DEV, "Error while reading metadata.\n");
+ drbd_err(device, "Error while reading metadata.\n");
rv = ERR_IO_MD_DISK;
goto err;
}
@@ -3123,7 +3261,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
(magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
/* btw: that's Activity Log clean, not "all" clean. */
- dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
rv = ERR_MD_UNCLEAN;
goto err;
}
@@ -3131,14 +3269,14 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
- dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
- dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
goto err;
}
if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
- dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
+ drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
@@ -3155,182 +3293,182 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
- if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+ if (check_activity_log_stripe_size(device, buffer, &bdev->md))
goto err;
- if (check_offsets_and_sizes(mdev, bdev))
+ if (check_offsets_and_sizes(device, bdev))
goto err;
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
- dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
+ drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
- dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
+ drbd_err(device, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
goto err;
}
rv = NO_ERROR;
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED) {
+ spin_lock_irq(&device->resource->req_lock);
+ if (device->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
- mdev->peer_max_bio_size = peer;
+ device->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
err:
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
return rv;
}
/**
* drbd_md_mark_dirty() - Mark meta data super block as dirty
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Call this function if you change anything that should be written to
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
#ifdef DEBUG
-void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
+void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
- mod_timer(&mdev->md_sync_timer, jiffies + HZ);
- mdev->last_md_mark_dirty.line = line;
- mdev->last_md_mark_dirty.func = func;
+ if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
+ mod_timer(&device->md_sync_timer, jiffies + HZ);
+ device->last_md_mark_dirty.line = line;
+ device->last_md_mark_dirty.func = func;
}
}
#else
-void drbd_md_mark_dirty(struct drbd_conf *mdev)
+void drbd_md_mark_dirty(struct drbd_device *device)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
- mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
+ if (!test_and_set_bit(MD_DIRTY, &device->flags))
+ mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
}
#endif
-void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{
int i;
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
- mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
+ device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
}
-void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
- if (mdev->state.role == R_PRIMARY)
+ if (device->state.role == R_PRIMARY)
val |= 1;
else
val &= ~((u64)1);
- drbd_set_ed_uuid(mdev, val);
+ drbd_set_ed_uuid(device, val);
}
- mdev->ldev->md.uuid[idx] = val;
- drbd_md_mark_dirty(mdev);
+ device->ldev->md.uuid[idx] = val;
+ drbd_md_mark_dirty(device);
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
-void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- if (mdev->ldev->md.uuid[idx]) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[idx]) {
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
}
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
/**
* drbd_uuid_new_current() - Creates a new current UUID
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Creates a new current UUID, and rotates the old current UUID into
* the bitmap slot. Causes an incremental resync upon next connect.
*/
-void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
- __drbd_uuid_set(mdev, UI_CURRENT, val);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(device, UI_CURRENT, val);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- drbd_print_uuids(mdev, "new current UUID");
+ drbd_print_uuids(device, "new current UUID");
/* get it to stable storage _now_ */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
-void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
+void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
- mdev->ldev->md.uuid[UI_BITMAP] = 0;
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+ device->ldev->md.uuid[UI_BITMAP] = 0;
} else {
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
+ device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
- drbd_md_mark_dirty(mdev);
+ drbd_md_mark_dirty(device);
}
/**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_set_n_write(struct drbd_conf *mdev)
+int drbd_bmio_set_n_write(struct drbd_device *device)
{
int rv = -EIO;
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
- drbd_bm_set_all(mdev);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
+ drbd_bm_set_all(device);
- rv = drbd_bm_write(mdev);
+ rv = drbd_bm_write(device);
if (!rv) {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
+ put_ldev(device);
}
return rv;
@@ -3338,19 +3476,19 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
/**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
+int drbd_bmio_clear_n_write(struct drbd_device *device)
{
int rv = -EIO;
- drbd_resume_al(mdev);
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_bm_clear_all(mdev);
- rv = drbd_bm_write(mdev);
- put_ldev(mdev);
+ drbd_resume_al(device);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_bm_clear_all(device);
+ rv = drbd_bm_write(device);
+ put_ldev(device);
}
return rv;
@@ -3358,50 +3496,52 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
static int w_bitmap_io(struct drbd_work *w, int unused)
{
- struct bm_io_work *work = container_of(w, struct bm_io_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, bm_io_work.w);
+ struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
- D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
+ D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
- if (get_ldev(mdev)) {
- drbd_bm_lock(mdev, work->why, work->flags);
- rv = work->io_fn(mdev);
- drbd_bm_unlock(mdev);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_bm_lock(device, work->why, work->flags);
+ rv = work->io_fn(device);
+ drbd_bm_unlock(device);
+ put_ldev(device);
}
- clear_bit_unlock(BITMAP_IO, &mdev->flags);
- wake_up(&mdev->misc_wait);
+ clear_bit_unlock(BITMAP_IO, &device->flags);
+ wake_up(&device->misc_wait);
if (work->done)
- work->done(mdev, rv);
+ work->done(device, rv);
- clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ clear_bit(BITMAP_IO_QUEUED, &device->flags);
work->why = NULL;
work->flags = 0;
return 0;
}
-void drbd_ldev_destroy(struct drbd_conf *mdev)
+void drbd_ldev_destroy(struct drbd_device *device)
{
- lc_destroy(mdev->resync);
- mdev->resync = NULL;
- lc_destroy(mdev->act_log);
- mdev->act_log = NULL;
+ lc_destroy(device->resync);
+ device->resync = NULL;
+ lc_destroy(device->act_log);
+ device->act_log = NULL;
__no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;);
- clear_bit(GO_DISKLESS, &mdev->flags);
+ clear_bit(GO_DISKLESS, &device->flags);
}
static int w_go_diskless(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, go_diskless);
- D_ASSERT(mdev->state.disk == D_FAILED);
+ D_ASSERT(device, device->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
@@ -3420,27 +3560,27 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* We still need to check if both bitmap and ldev are present, we may
* end up here after a failed attach, before ldev was even assigned.
*/
- if (mdev->bitmap && mdev->ldev) {
+ if (device->bitmap && device->ldev) {
/* An interrupted resync or similar is allowed to recounts bits
* while we detach.
* Any modifications would not be expected anymore, though.
*/
- if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
"detach", BM_LOCKED_TEST_ALLOWED)) {
- if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ if (test_bit(WAS_READ_ERROR, &device->flags)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
}
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(device, NS(disk, D_DISKLESS));
return 0;
}
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
@@ -3450,76 +3590,77 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* called from worker context. It MUST NOT be used while a previous such
* work is still pending!
*/
-void drbd_queue_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- void (*done)(struct drbd_conf *, int),
+void drbd_queue_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
+ void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->tconn->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
- D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
- D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
- D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
- if (mdev->bm_io_work.why)
- dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
- why, mdev->bm_io_work.why);
+ D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
+ D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
+ D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
+ if (device->bm_io_work.why)
+ drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
+ why, device->bm_io_work.why);
- mdev->bm_io_work.io_fn = io_fn;
- mdev->bm_io_work.done = done;
- mdev->bm_io_work.why = why;
- mdev->bm_io_work.flags = flags;
+ device->bm_io_work.io_fn = io_fn;
+ device->bm_io_work.done = done;
+ device->bm_io_work.why = why;
+ device->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->tconn->req_lock);
- set_bit(BITMAP_IO, &mdev->flags);
- if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
+ spin_lock_irq(&device->resource->req_lock);
+ set_bit(BITMAP_IO, &device->flags);
+ if (atomic_read(&device->ap_bio_cnt) == 0) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->bm_io_work.w);
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
/**
* drbd_bitmap_io() - Does an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO
*
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
+int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
- D_ASSERT(current != mdev->tconn->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_suspend_io(mdev);
+ drbd_suspend_io(device);
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
+ drbd_bm_lock(device, why, flags);
+ rv = io_fn(device);
+ drbd_bm_unlock(device);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return rv;
}
-void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != flag) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags |= flag;
+ if ((device->ldev->md.flags & flag) != flag) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags |= flag;
}
}
-void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != 0) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags &= ~flag;
+ if ((device->ldev->md.flags & flag) != 0) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags &= ~flag;
}
}
int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
@@ -3529,23 +3670,25 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
/* must not double-queue! */
- if (list_empty(&mdev->md_sync_work.list))
- drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
+ if (list_empty(&device->md_sync_work.list))
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &device->md_sync_work);
}
static int w_md_sync(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, md_sync_work);
- dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
+ drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
- dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
- mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
+ drbd_warn(device, "last md_mark_dirty: %s:%u\n",
+ device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
#endif
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
return 0;
}
@@ -3621,18 +3764,18 @@ const char *cmdname(enum drbd_packet cmd)
/**
* drbd_wait_misc - wait for a request to make progress
- * @mdev: device associated with the request
+ * @device: device associated with the request
* @i: the struct drbd_interval embedded in struct drbd_request or
* struct drbd_peer_request
*/
-int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
{
struct net_conf *nc;
DEFINE_WAIT(wait);
long timeout;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -ETIMEDOUT;
@@ -3640,14 +3783,14 @@ int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
rcu_read_unlock();
- /* Indicate to wake up mdev->misc_wait on progress. */
+ /* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&device->resource->req_lock);
timeout = schedule_timeout(timeout);
- finish_wait(&mdev->misc_wait, &wait);
- spin_lock_irq(&mdev->tconn->req_lock);
- if (!timeout || mdev->state.conn < C_CONNECTED)
+ finish_wait(&device->misc_wait, &wait);
+ spin_lock_irq(&device->resource->req_lock);
+ if (!timeout || device->state.conn < C_CONNECTED)
return -ETIMEDOUT;
if (signal_pending(current))
return -ERESTARTSYS;
@@ -3703,20 +3846,20 @@ _drbd_fault_str(unsigned int type) {
}
unsigned int
-_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
+_drbd_insert_fault(struct drbd_device *device, unsigned int type)
{
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
(fault_devs == 0 ||
- ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
+ ((1 << device_to_minor(device)) & fault_devs) != 0) &&
(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
if (ret) {
fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "***Simulating %s failure\n",
+ drbd_warn(device, "***Simulating %s failure\n",
_drbd_fault_str(type));
}