summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma_v2.h
blob: ac00adc81974adaf2545259ef1f5ddd7cc7ea7d2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*
 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
 * The full GNU General Public License is included in this distribution in the
 * file called COPYING.
 */
#ifndef IOATDMA_V2_H
#define IOATDMA_V2_H

#include <linux/dmaengine.h>
#include "dma.h"
#include "hw.h"


extern int ioat_pending_level;

/*
 * workaround for IOAT ver.3.0 null descriptor issue
 * (channel returns error when size is 0)
 */
#define NULL_DESC_BUFFER_SIZE 1

#define IOAT_MAX_ORDER 16
#define ioat_get_alloc_order() \
	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
#define ioat_get_max_alloc_order() \
	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))

/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
 * @base: common ioat channel parameters
 * @xfercap_log; log2 of channel max transfer length (for fast division)
 * @head: allocated index
 * @issued: hardware notification point
 * @tail: cleanup index
 * @pending: lock free indicator for issued != head
 * @dmacount: identical to 'head' except for occasionally resetting to zero
 * @alloc_order: log2 of the number of allocated descriptors
 * @ring: software ring buffer implementation of hardware ring
 * @ring_lock: protects ring attributes
 */
struct ioat2_dma_chan {
	struct ioat_chan_common base;
	size_t xfercap_log;
	u16 head;
	u16 issued;
	u16 tail;
	u16 dmacount;
	u16 alloc_order;
	int pending;
	struct ioat_ring_ent **ring;
	spinlock_t ring_lock;
};

static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
{
	struct ioat_chan_common *chan = to_chan_common(c);

	return container_of(chan, struct ioat2_dma_chan, base);
}

static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
{
	return (1 << ioat->alloc_order) - 1;
}

/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{
	return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
}

/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{
	return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
}

static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
	u16 num_descs = ioat2_ring_mask(ioat) + 1;
	u16 active = ioat2_ring_active(ioat);

	BUG_ON(active > num_descs);

	return num_descs - active;
}

/* assumes caller already checked space */
static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
{
	ioat->head += len;
	return ioat->head - len;
}

static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
{
	u16 num_descs = len >> ioat->xfercap_log;

	num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
	return num_descs;
}

struct ioat_ring_ent {
	struct ioat_dma_descriptor *hw;
	size_t len;
	struct dma_async_tx_descriptor txd;
	#ifdef DEBUG
	int id;
	#endif
};

static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{
	return ioat->ring[idx & ioat2_ring_mask(ioat)];
}

static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
{
	struct ioat_chan_common *chan = &ioat->base;

	writel(addr & 0x00000000FFFFFFFF,
	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
	writel(addr >> 32,
	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}

int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */