summaryrefslogtreecommitdiff
path: root/include/asm-m68knommu/dma.h
blob: 43e98c96a5c255b1cf2c07759f328526c3892075 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
#ifndef _M68K_DMA_H
#define _M68K_DMA_H 1
 
//#define	DMA_DEBUG	1

#include <linux/config.h>

#ifdef CONFIG_COLDFIRE
/*
 * ColdFire DMA Model:
 *   ColdFire DMA supports two forms of DMA: Single and Dual address. Single
 * address mode emits a source address, and expects that the device will either
 * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
 * the device will place data on the correct byte(s) of the data bus, as the
 * memory transactions are always 32 bits. This implies that only 32 bit
 * devices will find single mode transfers useful. Dual address DMA mode
 * performs two cycles: source read and destination write. ColdFire will
 * align the data so that the device will always get the correct bytes, thus
 * is useful for 8 and 16 bit devices. This is the mode that is supported
 * below.
 *
 * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 
 *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
 *
 * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
 *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
 *
 * APR/18/2002 : added proper support for MCF5272 DMA controller.
 *               Arthur Shipkowski (art@videon-central.com)
 */

#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfdma.h>

/*
 * Set number of channels of DMA on ColdFire for different implementations.
 */
#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
#define MAX_M68K_DMA_CHANNELS 4
#elif defined(CONFIG_M5272)
#define MAX_M68K_DMA_CHANNELS 1
#else
#define MAX_M68K_DMA_CHANNELS 2
#endif

extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];

#if !defined(CONFIG_M5272)
#define DMA_MODE_WRITE_BIT  0x01  /* Memory/IO to IO/Memory select */
#define DMA_MODE_WORD_BIT   0x02  /* 8 or 16 bit transfers */
#define DMA_MODE_LONG_BIT   0x04  /* or 32 bit transfers */
#define DMA_MODE_SINGLE_BIT 0x08  /* single-address-mode */

/* I/O to memory, 8 bits, mode */
#define DMA_MODE_READ	            0
/* memory to I/O, 8 bits, mode */
#define DMA_MODE_WRITE	            1
/* I/O to memory, 16 bits, mode */
#define DMA_MODE_READ_WORD          2
/* memory to I/O, 16 bits, mode */
#define DMA_MODE_WRITE_WORD         3
/* I/O to memory, 32 bits, mode */
#define DMA_MODE_READ_LONG          4
/* memory to I/O, 32 bits, mode */
#define DMA_MODE_WRITE_LONG         5
/* I/O to memory, 8 bits, single-address-mode */     
#define DMA_MODE_READ_SINGLE        8
/* memory to I/O, 8 bits, single-address-mode */
#define DMA_MODE_WRITE_SINGLE       9
/* I/O to memory, 16 bits, single-address-mode */
#define DMA_MODE_READ_WORD_SINGLE  10
/* memory to I/O, 16 bits, single-address-mode */
#define DMA_MODE_WRITE_WORD_SINGLE 11
/* I/O to memory, 32 bits, single-address-mode */
#define DMA_MODE_READ_LONG_SINGLE  12
/* memory to I/O, 32 bits, single-address-mode */
#define DMA_MODE_WRITE_LONG_SINGLE 13

#else /* CONFIG_M5272 is defined */

/* Source static-address mode */
#define DMA_MODE_SRC_SA_BIT 0x01  
/* Two bits to select between all four modes */
#define DMA_MODE_SSIZE_MASK 0x06 
/* Offset to shift bits in */
#define DMA_MODE_SSIZE_OFF  0x01  
/* Destination static-address mode */
#define DMA_MODE_DES_SA_BIT 0x10  
/* Two bits to select between all four modes */
#define DMA_MODE_DSIZE_MASK 0x60  
/* Offset to shift bits in */
#define DMA_MODE_DSIZE_OFF  0x05
/* Size modifiers */
#define DMA_MODE_SIZE_LONG  0x00
#define DMA_MODE_SIZE_BYTE  0x01
#define DMA_MODE_SIZE_WORD  0x02
#define DMA_MODE_SIZE_LINE  0x03

/* 
 * Aliases to help speed quick ports; these may be suboptimal, however. They
 * do not include the SINGLE mode modifiers since the MCF5272 does not have a
 * mode where the device is in control of its addressing.
 */

/* I/O to memory, 8 bits, mode */
#define DMA_MODE_READ	              ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 8 bits, mode */
#define DMA_MODE_WRITE	            ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
/* I/O to memory, 16 bits, mode */
#define DMA_MODE_READ_WORD	        ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 16 bits, mode */
#define DMA_MODE_WRITE_WORD         ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
/* I/O to memory, 32 bits, mode */
#define DMA_MODE_READ_LONG	        ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
/* memory to I/O, 32 bits, mode */
#define DMA_MODE_WRITE_LONG         ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)

#endif /* !defined(CONFIG_M5272) */

#if !defined(CONFIG_M5272)
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
  volatile unsigned short *dmawp;

#ifdef DMA_DEBUG
  printk("enable_dma(dmanr=%d)\n", dmanr);
#endif

  dmawp = (unsigned short *) dma_base_addr[dmanr];
  dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
}

static __inline__ void disable_dma(unsigned int dmanr)
{
  volatile unsigned short *dmawp;
  volatile unsigned char  *dmapb;

#ifdef DMA_DEBUG
  printk("disable_dma(dmanr=%d)\n", dmanr);
#endif

  dmawp = (unsigned short *) dma_base_addr[dmanr];
  dmapb = (unsigned char *) dma_base_addr[dmanr];

  /* Turn off external requests, and stop any DMA in progress */
  dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
  dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
}

/*
 * Clear the 'DMA Pointer Flip Flop'.
 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 * Use this once to initialize the FF to a known state.
 * After that, keep track of it. :-)
 * --- In order to do that, the DMA routines below should ---
 * --- only be used while interrupts are disabled! ---
 *
 * This is a NOP for ColdFire. Provide a stub for compatibility.
 */
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}

/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{

  volatile unsigned char  *dmabp;
  volatile unsigned short *dmawp;

#ifdef DMA_DEBUG
  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
#endif

  dmabp = (unsigned char *) dma_base_addr[dmanr];
  dmawp = (unsigned short *) dma_base_addr[dmanr];

  // Clear config errors
  dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; 

  // Set command register
  dmawp[MCFDMA_DCR] =
    MCFDMA_DCR_INT |         // Enable completion irq
    MCFDMA_DCR_CS |          // Force one xfer per request
    MCFDMA_DCR_AA |          // Enable auto alignment
    // single-address-mode
    ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
    // sets s_rw (-> r/w) high if Memory to I/0
    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
    // Memory to I/O or I/O to Memory
    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
    // 32 bit, 16 bit or 8 bit transfers
    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_SSIZE_WORD : 
     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
                                   MCFDMA_DCR_SSIZE_BYTE)) |
    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_DSIZE_WORD :
     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
                                   MCFDMA_DCR_DSIZE_BYTE));

#ifdef DEBUG_DMA
  printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
         dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
	 (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
#endif
}

/* Set transfer address for specific DMA channel */
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
  volatile unsigned short *dmawp;
  volatile unsigned int   *dmalp;

#ifdef DMA_DEBUG
  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif

  dmawp = (unsigned short *) dma_base_addr[dmanr];
  dmalp = (unsigned int *) dma_base_addr[dmanr];

  // Determine which address registers are used for memory/device accesses
  if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
    // Source incrementing, must be memory
    dmalp[MCFDMA_SAR] = a;
    // Set dest address, must be device
    dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
  } else {
    // Destination incrementing, must be memory
    dmalp[MCFDMA_DAR] = a;
    // Set source address, must be device
    dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
  }

#ifdef DEBUG_DMA
  printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
	__FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
	(int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
	(int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
#endif
}

/*
 * Specific for Coldfire - sets device address.
 * Should be called after the mode set call, and before set DMA address.
 */
static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
{
#ifdef DMA_DEBUG
  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif

  dma_device_address[dmanr] = a;
}

/*
 * NOTE 2: "count" represents _bytes_.
 */
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
  volatile unsigned short *dmawp;

#ifdef DMA_DEBUG
  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
#endif

  dmawp = (unsigned short *) dma_base_addr[dmanr];
  dmawp[MCFDMA_BCR] = (unsigned short)count;
}

/*
 * Get DMA residue count. After a DMA transfer, this
 * should return zero. Reading this while a DMA transfer is
 * still in progress will return unpredictable results.
 * Otherwise, it returns the number of _bytes_ left to transfer.
 */
static __inline__ int get_dma_residue(unsigned int dmanr)
{
  volatile unsigned short *dmawp;
  unsigned short count;

#ifdef DMA_DEBUG
  printk("get_dma_residue(dmanr=%d)\n", dmanr);
#endif

  dmawp = (unsigned short *) dma_base_addr[dmanr];
  count = dmawp[MCFDMA_BCR];
  return((int) count);
}
#else /* CONFIG_M5272 is defined */

/*
 * The MCF5272 DMA controller is very different than the controller defined above
 * in terms of register mapping.  For instance, with the exception of the 16-bit 
 * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
 *
 * The big difference, however, is the lack of device-requested DMA.  All modes
 * are dual address transfer, and there is no 'device' setup or direction bit.
 * You can DMA between a device and memory, between memory and memory, or even between
 * two devices directly, with any combination of incrementing and non-incrementing
 * addresses you choose.  This puts a crimp in distinguishing between the 'device 
 * address' set up by set_dma_device_addr.
 *
 * Therefore, there are two options.  One is to use set_dma_addr and set_dma_device_addr,
 * which will act exactly as above in -- it will look to see if the source is set to
 * autoincrement, and if so it will make the source use the set_dma_addr value and the
 * destination the set_dma_device_addr value.  Otherwise the source will be set to the
 * set_dma_device_addr value and the destination will get the set_dma_addr value.
 *
 * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
 * and make it explicit.  Depending on what you're doing, one of these two should work
 * for you, but don't mix them in the same transfer setup.
 */

/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
  volatile unsigned int  *dmalp;

#ifdef DMA_DEBUG
  printk("enable_dma(dmanr=%d)\n", dmanr);
#endif

  dmalp = (unsigned int *) dma_base_addr[dmanr];
  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
}

static __inline__ void disable_dma(unsigned int dmanr)
{
  volatile unsigned int   *dmalp;

#ifdef DMA_DEBUG
  printk("disable_dma(dmanr=%d)\n", dmanr);
#endif

  dmalp = (unsigned int *) dma_base_addr[dmanr];

  /* Turn off external requests, and stop any DMA in progress */
  dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
}

/*
 * Clear the 'DMA Pointer Flip Flop'.
 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 * Use this once to initialize the FF to a known state.
 * After that, keep track of it. :-)
 * --- In order to do that, the DMA routines below should ---
 * --- only be used while interrupts are disabled! ---
 *
 * This is a NOP for ColdFire. Provide a stub for compatibility.
 */
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}

/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{

  volatile unsigned int   *dmalp;
  volatile unsigned short *dmawp;

#ifdef DMA_DEBUG
  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
#endif
  dmalp = (unsigned int *) dma_base_addr[dmanr];
  dmawp = (unsigned short *) dma_base_addr[dmanr];

  // Clear config errors
  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; 

  // Set command register
  dmalp[MCFDMA_DMR] =
    MCFDMA_DMR_RQM_DUAL |         // Mandatory Request Mode setting
    MCFDMA_DMR_DSTT_SD  |         // Set up addressing types; set to supervisor-data.
    MCFDMA_DMR_SRCT_SD  |         // Set up addressing types; set to supervisor-data. 
    // source static-address-mode
    ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
    // dest static-address-mode
    ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
    // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272
    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
    
  dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN;   /* Enable completion interrupts */
  
#ifdef DEBUG_DMA
  printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
         dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
	 (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
#endif
}

/* Set transfer address for specific DMA channel */
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
  volatile unsigned int   *dmalp;

#ifdef DMA_DEBUG
  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif

  dmalp = (unsigned int *) dma_base_addr[dmanr];

  // Determine which address registers are used for memory/device accesses
  if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
    // Source incrementing, must be memory
    dmalp[MCFDMA_DSAR] = a;
    // Set dest address, must be device
    dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
  } else {
    // Destination incrementing, must be memory
    dmalp[MCFDMA_DDAR] = a;
    // Set source address, must be device
    dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
  }

#ifdef DEBUG_DMA
  printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
	__FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
	(int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
	(int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
#endif
}

/*
 * Specific for Coldfire - sets device address.
 * Should be called after the mode set call, and before set DMA address.
 */
static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
{
#ifdef DMA_DEBUG
  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
#endif

  dma_device_address[dmanr] = a;
}

/*
 * NOTE 2: "count" represents _bytes_.
 *
 * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
 */
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
  volatile unsigned int *dmalp;
  
#ifdef DMA_DEBUG
  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
#endif

  dmalp = (unsigned int *) dma_base_addr[dmanr];
  dmalp[MCFDMA_DBCR] = count;
}

/*
 * Get DMA residue count. After a DMA transfer, this
 * should return zero. Reading this while a DMA transfer is
 * still in progress will return unpredictable results.
 * Otherwise, it returns the number of _bytes_ left to transfer.
 */
static __inline__ int get_dma_residue(unsigned int dmanr)
{
  volatile unsigned int *dmalp;
  unsigned int count;

#ifdef DMA_DEBUG
  printk("get_dma_residue(dmanr=%d)\n", dmanr);
#endif

  dmalp = (unsigned int *) dma_base_addr[dmanr];
  count = dmalp[MCFDMA_DBCR];
  return(count);
}

#endif /* !defined(CONFIG_M5272) */
#endif /* CONFIG_COLDFIRE */
 
#define MAX_DMA_CHANNELS 8

/* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any
   occurrence should be flagged as an error.  */
/* under 2.4 it is actually needed by the new bootmem allocator */
#define MAX_DMA_ADDRESS PAGE_OFFSET

/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char *device_id);	/* reserve a DMA channel */
extern void free_dma(unsigned int dmanr);	/* release it again */
 
#endif /* _M68K_DMA_H */