1/* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 5 * 2 of the License, or (at your option) any later version. 6 * 7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> 8 * (c) Copyright 2000, 2001 Red Hat Inc 9 * 10 * Development of this driver was funded by Equiinet Ltd 11 * http://www.equiinet.com 12 * 13 * ChangeLog: 14 * 15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the 16 * unification of all the Z85x30 asynchronous drivers for real. 17 * 18 * DMA now uses get_free_page as kmalloc buffers may span a 64K 19 * boundary. 20 * 21 * Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com> 22 * 23 * Performance 24 * 25 * Z85230: 26 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud 27 * X.25 is not unrealistic on all machines. DMA mode can in theory 28 * handle T1/E1 quite nicely. In practice the limit seems to be about 29 * 512Kbit->1Mbit depending on motherboard. 30 * 31 * Z85C30: 32 * 64K will take DMA, 9600 baud X.25 should be ok. 33 * 34 * Z8530: 35 * Synchronous mode without DMA is unlikely to pass about 2400 baud. 36 */ 37 38#include <linux/module.h> 39#include <linux/kernel.h> 40#include <linux/mm.h> 41#include <linux/net.h> 42#include <linux/skbuff.h> 43#include <linux/netdevice.h> 44#include <linux/if_arp.h> 45#include <linux/delay.h> 46#include <linux/ioport.h> 47#include <linux/init.h> 48#include <asm/dma.h> 49#include <asm/io.h> 50#define RT_LOCK 51#define RT_UNLOCK 52#include <linux/spinlock.h> 53 54#include <net/syncppp.h> 55#include "z85230.h" 56 57 58/** 59 * z8530_read_port - Architecture specific interface function 60 * @p: port to read 61 * 62 * Provided port access methods. The Comtrol SV11 requires no delays 63 * between accesses and uses PC I/O. Some drivers may need a 5uS delay 64 * 65 * In the longer term this should become an architecture specific 66 * section so that this can become a generic driver interface for all 67 * platforms. For now we only handle PC I/O ports with or without the 68 * dread 5uS sanity delay. 69 * 70 * The caller must hold sufficient locks to avoid violating the horrible 71 * 5uS delay rule. 72 */ 73 74static inline int z8530_read_port(unsigned long p) 75{ 76 u8 r=inb(Z8530_PORT_OF(p)); 77 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */ 78 udelay(5); 79 return r; 80} 81 82/** 83 * z8530_write_port - Architecture specific interface function 84 * @p: port to write 85 * @d: value to write 86 * 87 * Write a value to a port with delays if need be. Note that the 88 * caller must hold locks to avoid read/writes from other contexts 89 * violating the 5uS rule 90 * 91 * In the longer term this should become an architecture specific 92 * section so that this can become a generic driver interface for all 93 * platforms. For now we only handle PC I/O ports with or without the 94 * dread 5uS sanity delay. 95 */ 96 97 98static inline void z8530_write_port(unsigned long p, u8 d) 99{ 100 outb(d,Z8530_PORT_OF(p)); 101 if(p&Z8530_PORT_SLEEP) 102 udelay(5); 103} 104 105 106 107static void z8530_rx_done(struct z8530_channel *c); 108static void z8530_tx_done(struct z8530_channel *c); 109 110 111 112static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) 113{ 114 if(reg) 115 z8530_write_port(c->ctrlio, reg); 116 return z8530_read_port(c->ctrlio); 117} 118 119/** 120 * read_zsdata - Read the data port of a Z8530 channel 121 * @c: The Z8530 channel to read the data port from 122 * 123 * The data port provides fast access to some things. We still 124 * have all the 5uS delays to worry about. 125 */ 126 127static inline u8 read_zsdata(struct z8530_channel *c) 128{ 129 u8 r; 130 r=z8530_read_port(c->dataio); 131 return r; 132} 133 134/** 135 * write_zsreg - Write to a Z8530 channel register 136 * @c: The Z8530 channel 137 * @reg: Register number 138 * @val: Value to write 139 * 140 * Write a value to an indexed register. The caller must hold the lock 141 * to honour the irritating delay rules. We know about register 0 142 * being fast to access. 143 * 144 * Assumes c->lock is held. 145 */ 146static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) 147{ 148 if(reg) 149 z8530_write_port(c->ctrlio, reg); 150 z8530_write_port(c->ctrlio, val); 151 152} 153 154/** 155 * write_zsctrl - Write to a Z8530 control register 156 * @c: The Z8530 channel 157 * @val: Value to write 158 * 159 * Write directly to the control register on the Z8530 160 */ 161 162static inline void write_zsctrl(struct z8530_channel *c, u8 val) 163{ 164 z8530_write_port(c->ctrlio, val); 165} 166 167/** 168 * write_zsdata - Write to a Z8530 control register 169 * @c: The Z8530 channel 170 * @val: Value to write 171 * 172 * Write directly to the data register on the Z8530 173 */ 174 175 176static inline void write_zsdata(struct z8530_channel *c, u8 val) 177{ 178 z8530_write_port(c->dataio, val); 179} 180 181/* 182 * Register loading parameters for a dead port 183 */ 184 185u8 z8530_dead_port[]= 186{ 187 255 188}; 189 190EXPORT_SYMBOL(z8530_dead_port); 191 192/* 193 * Register loading parameters for currently supported circuit types 194 */ 195 196 197/* 198 * Data clocked by telco end. This is the correct data for the UK 199 * "kilostream" service, and most other similar services. 200 */ 201 202u8 z8530_hdlc_kilostream[]= 203{ 204 4, SYNC_ENAB|SDLC|X1CLK, 205 2, 0, /* No vector */ 206 1, 0, 207 3, ENT_HM|RxCRC_ENAB|Rx8, 208 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, 209 9, 0, /* Disable interrupts */ 210 6, 0xFF, 211 7, FLAG, 212 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/ 213 11, TCTRxCP, 214 14, DISDPLL, 215 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, 216 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, 217 9, NV|MIE|NORESET, 218 255 219}; 220 221EXPORT_SYMBOL(z8530_hdlc_kilostream); 222 223/* 224 * As above but for enhanced chips. 225 */ 226 227u8 z8530_hdlc_kilostream_85230[]= 228{ 229 4, SYNC_ENAB|SDLC|X1CLK, 230 2, 0, /* No vector */ 231 1, 0, 232 3, ENT_HM|RxCRC_ENAB|Rx8, 233 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, 234 9, 0, /* Disable interrupts */ 235 6, 0xFF, 236 7, FLAG, 237 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */ 238 11, TCTRxCP, 239 14, DISDPLL, 240 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, 241 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, 242 9, NV|MIE|NORESET, 243 23, 3, /* Extended mode AUTO TX and EOM*/ 244 245 255 246}; 247 248EXPORT_SYMBOL(z8530_hdlc_kilostream_85230); 249 250/** 251 * z8530_flush_fifo - Flush on chip RX FIFO 252 * @c: Channel to flush 253 * 254 * Flush the receive FIFO. There is no specific option for this, we 255 * blindly read bytes and discard them. Reading when there is no data 256 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes. 257 * 258 * All locking is handled for the caller. On return data may still be 259 * present if it arrived during the flush. 260 */ 261 262static void z8530_flush_fifo(struct z8530_channel *c) 263{ 264 read_zsreg(c, R1); 265 read_zsreg(c, R1); 266 read_zsreg(c, R1); 267 read_zsreg(c, R1); 268 if(c->dev->type==Z85230) 269 { 270 read_zsreg(c, R1); 271 read_zsreg(c, R1); 272 read_zsreg(c, R1); 273 read_zsreg(c, R1); 274 } 275} 276 277/** 278 * z8530_rtsdtr - Control the outgoing DTS/RTS line 279 * @c: The Z8530 channel to control; 280 * @set: 1 to set, 0 to clear 281 * 282 * Sets or clears DTR/RTS on the requested line. All locking is handled 283 * by the caller. For now we assume all boards use the actual RTS/DTR 284 * on the chip. Apparently one or two don't. We'll scream about them 285 * later. 286 */ 287 288static void z8530_rtsdtr(struct z8530_channel *c, int set) 289{ 290 if (set) 291 c->regs[5] |= (RTS | DTR); 292 else 293 c->regs[5] &= ~(RTS | DTR); 294 write_zsreg(c, R5, c->regs[5]); 295} 296 297/** 298 * z8530_rx - Handle a PIO receive event 299 * @c: Z8530 channel to process 300 * 301 * Receive handler for receiving in PIO mode. This is much like the 302 * async one but not quite the same or as complex 303 * 304 * Note: Its intended that this handler can easily be separated from 305 * the main code to run realtime. That'll be needed for some machines 306 * (eg to ever clock 64kbits on a sparc ;)). 307 * 308 * The RT_LOCK macros don't do anything now. Keep the code covered 309 * by them as short as possible in all circumstances - clocks cost 310 * baud. The interrupt handler is assumed to be atomic w.r.t. to 311 * other code - this is true in the RT case too. 312 * 313 * We only cover the sync cases for this. If you want 2Mbit async 314 * do it yourself but consider medical assistance first. This non DMA 315 * synchronous mode is portable code. The DMA mode assumes PCI like 316 * ISA DMA 317 * 318 * Called with the device lock held 319 */ 320 321static void z8530_rx(struct z8530_channel *c) 322{ 323 u8 ch,stat; 324 325 while(1) 326 { 327 /* FIFO empty ? */ 328 if(!(read_zsreg(c, R0)&1)) 329 break; 330 ch=read_zsdata(c); 331 stat=read_zsreg(c, R1); 332 333 /* 334 * Overrun ? 335 */ 336 if(c->count < c->max) 337 { 338 *c->dptr++=ch; 339 c->count++; 340 } 341 342 if(stat&END_FR) 343 { 344 345 /* 346 * Error ? 347 */ 348 if(stat&(Rx_OVR|CRC_ERR)) 349 { 350 /* Rewind the buffer and return */ 351 if(c->skb) 352 c->dptr=c->skb->data; 353 c->count=0; 354 if(stat&Rx_OVR) 355 { 356 printk(KERN_WARNING "%s: overrun\n", c->dev->name); 357 c->rx_overrun++; 358 } 359 if(stat&CRC_ERR) 360 { 361 c->rx_crc_err++; 362 /* printk("crc error\n"); */ 363 } 364 /* Shove the frame upstream */ 365 } 366 else 367 { 368 /* 369 * Drop the lock for RX processing, or 370 * there are deadlocks 371 */ 372 z8530_rx_done(c); 373 write_zsctrl(c, RES_Rx_CRC); 374 } 375 } 376 } 377 /* 378 * Clear irq 379 */ 380 write_zsctrl(c, ERR_RES); 381 write_zsctrl(c, RES_H_IUS); 382} 383 384 385/** 386 * z8530_tx - Handle a PIO transmit event 387 * @c: Z8530 channel to process 388 * 389 * Z8530 transmit interrupt handler for the PIO mode. The basic 390 * idea is to attempt to keep the FIFO fed. We fill as many bytes 391 * in as possible, its quite possible that we won't keep up with the 392 * data rate otherwise. 393 */ 394 395static void z8530_tx(struct z8530_channel *c) 396{ 397 while(c->txcount) { 398 /* FIFO full ? */ 399 if(!(read_zsreg(c, R0)&4)) 400 return; 401 c->txcount--; 402 /* 403 * Shovel out the byte 404 */ 405 write_zsreg(c, R8, *c->tx_ptr++); 406 write_zsctrl(c, RES_H_IUS); 407 /* We are about to underflow */ 408 if(c->txcount==0) 409 { 410 write_zsctrl(c, RES_EOM_L); 411 write_zsreg(c, R10, c->regs[10]&~ABUNDER); 412 } 413 } 414 415 416 /* 417 * End of frame TX - fire another one 418 */ 419 420 write_zsctrl(c, RES_Tx_P); 421 422 z8530_tx_done(c); 423 write_zsctrl(c, RES_H_IUS); 424} 425 426/** 427 * z8530_status - Handle a PIO status exception 428 * @chan: Z8530 channel to process 429 * 430 * A status event occurred in PIO synchronous mode. There are several 431 * reasons the chip will bother us here. A transmit underrun means we 432 * failed to feed the chip fast enough and just broke a packet. A DCD 433 * change is a line up or down. We communicate that back to the protocol 434 * layer for synchronous PPP to renegotiate. 435 */ 436 437static void z8530_status(struct z8530_channel *chan) 438{ 439 u8 status, altered; 440 441 status=read_zsreg(chan, R0); 442 altered=chan->status^status; 443 444 chan->status=status; 445 446 if(status&TxEOM) 447 { 448/* printk("%s: Tx underrun.\n", chan->dev->name); */ 449 chan->stats.tx_fifo_errors++; 450 write_zsctrl(chan, ERR_RES); 451 z8530_tx_done(chan); 452 } 453 454 if(altered&chan->dcdcheck) 455 { 456 if(status&chan->dcdcheck) 457 { 458 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 459 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 460 if(chan->netdevice && 461 ((chan->netdevice->type == ARPHRD_HDLC) || 462 (chan->netdevice->type == ARPHRD_PPP))) 463 sppp_reopen(chan->netdevice); 464 } 465 else 466 { 467 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 468 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 469 z8530_flush_fifo(chan); 470 } 471 472 } 473 write_zsctrl(chan, RES_EXT_INT); 474 write_zsctrl(chan, RES_H_IUS); 475} 476 477struct z8530_irqhandler z8530_sync= 478{ 479 z8530_rx, 480 z8530_tx, 481 z8530_status 482}; 483 484EXPORT_SYMBOL(z8530_sync); 485 486/** 487 * z8530_dma_rx - Handle a DMA RX event 488 * @chan: Channel to handle 489 * 490 * Non bus mastering DMA interfaces for the Z8x30 devices. This 491 * is really pretty PC specific. The DMA mode means that most receive 492 * events are handled by the DMA hardware. We get a kick here only if 493 * a frame ended. 494 */ 495 496static void z8530_dma_rx(struct z8530_channel *chan) 497{ 498 if(chan->rxdma_on) 499 { 500 /* Special condition check only */ 501 u8 status; 502 503 read_zsreg(chan, R7); 504 read_zsreg(chan, R6); 505 506 status=read_zsreg(chan, R1); 507 508 if(status&END_FR) 509 { 510 z8530_rx_done(chan); /* Fire up the next one */ 511 } 512 write_zsctrl(chan, ERR_RES); 513 write_zsctrl(chan, RES_H_IUS); 514 } 515 else 516 { 517 /* DMA is off right now, drain the slow way */ 518 z8530_rx(chan); 519 } 520} 521 522/** 523 * z8530_dma_tx - Handle a DMA TX event 524 * @chan: The Z8530 channel to handle 525 * 526 * We have received an interrupt while doing DMA transmissions. It 527 * shouldn't happen. Scream loudly if it does. 528 */ 529 530static void z8530_dma_tx(struct z8530_channel *chan) 531{ 532 if(!chan->dma_tx) 533 { 534 printk(KERN_WARNING "Hey who turned the DMA off?\n"); 535 z8530_tx(chan); 536 return; 537 } 538 /* This shouldnt occur in DMA mode */ 539 printk(KERN_ERR "DMA tx - bogus event!\n"); 540 z8530_tx(chan); 541} 542 543/** 544 * z8530_dma_status - Handle a DMA status exception 545 * @chan: Z8530 channel to process 546 * 547 * A status event occurred on the Z8530. We receive these for two reasons 548 * when in DMA mode. Firstly if we finished a packet transfer we get one 549 * and kick the next packet out. Secondly we may see a DCD change and 550 * have to poke the protocol layer. 551 * 552 */ 553 554static void z8530_dma_status(struct z8530_channel *chan) 555{ 556 u8 status, altered; 557 558 status=read_zsreg(chan, R0); 559 altered=chan->status^status; 560 561 chan->status=status; 562 563 564 if(chan->dma_tx) 565 { 566 if(status&TxEOM) 567 { 568 unsigned long flags; 569 570 flags=claim_dma_lock(); 571 disable_dma(chan->txdma); 572 clear_dma_ff(chan->txdma); 573 chan->txdma_on=0; 574 release_dma_lock(flags); 575 z8530_tx_done(chan); 576 } 577 } 578 579 if(altered&chan->dcdcheck) 580 { 581 if(status&chan->dcdcheck) 582 { 583 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 584 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 585 if(chan->netdevice && 586 ((chan->netdevice->type == ARPHRD_HDLC) || 587 (chan->netdevice->type == ARPHRD_PPP))) 588 sppp_reopen(chan->netdevice); 589 } 590 else 591 { 592 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 593 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 594 z8530_flush_fifo(chan); 595 } 596 } 597 598 write_zsctrl(chan, RES_EXT_INT); 599 write_zsctrl(chan, RES_H_IUS); 600} 601 602struct z8530_irqhandler z8530_dma_sync= 603{ 604 z8530_dma_rx, 605 z8530_dma_tx, 606 z8530_dma_status 607}; 608 609EXPORT_SYMBOL(z8530_dma_sync); 610 611struct z8530_irqhandler z8530_txdma_sync= 612{ 613 z8530_rx, 614 z8530_dma_tx, 615 z8530_dma_status 616}; 617 618EXPORT_SYMBOL(z8530_txdma_sync); 619 620/** 621 * z8530_rx_clear - Handle RX events from a stopped chip 622 * @c: Z8530 channel to shut up 623 * 624 * Receive interrupt vectors for a Z8530 that is in 'parked' mode. 625 * For machines with PCI Z85x30 cards, or level triggered interrupts 626 * (eg the MacII) we must clear the interrupt cause or die. 627 */ 628 629 630static void z8530_rx_clear(struct z8530_channel *c) 631{ 632 /* 633 * Data and status bytes 634 */ 635 u8 stat; 636 637 read_zsdata(c); 638 stat=read_zsreg(c, R1); 639 640 if(stat&END_FR) 641 write_zsctrl(c, RES_Rx_CRC); 642 /* 643 * Clear irq 644 */ 645 write_zsctrl(c, ERR_RES); 646 write_zsctrl(c, RES_H_IUS); 647} 648 649/** 650 * z8530_tx_clear - Handle TX events from a stopped chip 651 * @c: Z8530 channel to shut up 652 * 653 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode. 654 * For machines with PCI Z85x30 cards, or level triggered interrupts 655 * (eg the MacII) we must clear the interrupt cause or die. 656 */ 657 658static void z8530_tx_clear(struct z8530_channel *c) 659{ 660 write_zsctrl(c, RES_Tx_P); 661 write_zsctrl(c, RES_H_IUS); 662} 663 664/** 665 * z8530_status_clear - Handle status events from a stopped chip 666 * @chan: Z8530 channel to shut up 667 * 668 * Status interrupt vectors for a Z8530 that is in 'parked' mode. 669 * For machines with PCI Z85x30 cards, or level triggered interrupts 670 * (eg the MacII) we must clear the interrupt cause or die. 671 */ 672 673static void z8530_status_clear(struct z8530_channel *chan) 674{ 675 u8 status=read_zsreg(chan, R0); 676 if(status&TxEOM) 677 write_zsctrl(chan, ERR_RES); 678 write_zsctrl(chan, RES_EXT_INT); 679 write_zsctrl(chan, RES_H_IUS); 680} 681 682struct z8530_irqhandler z8530_nop= 683{ 684 z8530_rx_clear, 685 z8530_tx_clear, 686 z8530_status_clear 687}; 688 689 690EXPORT_SYMBOL(z8530_nop); 691 692/** 693 * z8530_interrupt - Handle an interrupt from a Z8530 694 * @irq: Interrupt number 695 * @dev_id: The Z8530 device that is interrupting. 696 * @regs: unused 697 * 698 * A Z85[2]30 device has stuck its hand in the air for attention. 699 * We scan both the channels on the chip for events and then call 700 * the channel specific call backs for each channel that has events. 701 * We have to use callback functions because the two channels can be 702 * in different modes. 703 * 704 * Locking is done for the handlers. Note that locking is done 705 * at the chip level (the 5uS delay issue is per chip not per 706 * channel). c->lock for both channels points to dev->lock 707 */ 708 709irqreturn_t z8530_interrupt(int irq, void *dev_id) 710{ 711 struct z8530_dev *dev=dev_id; 712 u8 intr; 713 static volatile int locker=0; 714 int work=0; 715 struct z8530_irqhandler *irqs; 716 717 if(locker) 718 { 719 printk(KERN_ERR "IRQ re-enter\n"); 720 return IRQ_NONE; 721 } 722 locker=1; 723 724 spin_lock(&dev->lock); 725 726 while(++work<5000) 727 { 728 729 intr = read_zsreg(&dev->chanA, R3); 730 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT))) 731 break; 732 733 /* This holds the IRQ status. On the 8530 you must read it from chan 734 A even though it applies to the whole chip */ 735 736 /* Now walk the chip and see what it is wanting - it may be 737 an IRQ for someone else remember */ 738 739 irqs=dev->chanA.irqs; 740 741 if(intr & (CHARxIP|CHATxIP|CHAEXT)) 742 { 743 if(intr&CHARxIP) 744 irqs->rx(&dev->chanA); 745 if(intr&CHATxIP) 746 irqs->tx(&dev->chanA); 747 if(intr&CHAEXT) 748 irqs->status(&dev->chanA); 749 } 750 751 irqs=dev->chanB.irqs; 752 753 if(intr & (CHBRxIP|CHBTxIP|CHBEXT)) 754 { 755 if(intr&CHBRxIP) 756 irqs->rx(&dev->chanB); 757 if(intr&CHBTxIP) 758 irqs->tx(&dev->chanB); 759 if(intr&CHBEXT) 760 irqs->status(&dev->chanB); 761 } 762 } 763 spin_unlock(&dev->lock); 764 if(work==5000) 765 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr); 766 /* Ok all done */ 767 locker=0; 768 return IRQ_HANDLED; 769} 770 771EXPORT_SYMBOL(z8530_interrupt); 772 773static char reg_init[16]= 774{ 775 0,0,0,0, 776 0,0,0,0, 777 0,0,0,0, 778 0x55,0,0,0 779}; 780 781 782/** 783 * z8530_sync_open - Open a Z8530 channel for PIO 784 * @dev: The network interface we are using 785 * @c: The Z8530 channel to open in synchronous PIO mode 786 * 787 * Switch a Z8530 into synchronous mode without DMA assist. We 788 * raise the RTS/DTR and commence network operation. 789 */ 790 791int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) 792{ 793 unsigned long flags; 794 795 spin_lock_irqsave(c->lock, flags); 796 797 c->sync = 1; 798 c->mtu = dev->mtu+64; 799 c->count = 0; 800 c->skb = NULL; 801 c->skb2 = NULL; 802 c->irqs = &z8530_sync; 803 804 /* This loads the double buffer up */ 805 z8530_rx_done(c); /* Load the frame ring */ 806 z8530_rx_done(c); /* Load the backup frame */ 807 z8530_rtsdtr(c,1); 808 c->dma_tx = 0; 809 c->regs[R1]|=TxINT_ENAB; 810 write_zsreg(c, R1, c->regs[R1]); 811 write_zsreg(c, R3, c->regs[R3]|RxENABLE); 812 813 spin_unlock_irqrestore(c->lock, flags); 814 return 0; 815} 816 817 818EXPORT_SYMBOL(z8530_sync_open); 819 820/** 821 * z8530_sync_close - Close a PIO Z8530 channel 822 * @dev: Network device to close 823 * @c: Z8530 channel to disassociate and move to idle 824 * 825 * Close down a Z8530 interface and switch its interrupt handlers 826 * to discard future events. 827 */ 828 829int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) 830{ 831 u8 chk; 832 unsigned long flags; 833 834 spin_lock_irqsave(c->lock, flags); 835 c->irqs = &z8530_nop; 836 c->max = 0; 837 c->sync = 0; 838 839 chk=read_zsreg(c,R0); 840 write_zsreg(c, R3, c->regs[R3]); 841 z8530_rtsdtr(c,0); 842 843 spin_unlock_irqrestore(c->lock, flags); 844 return 0; 845} 846 847EXPORT_SYMBOL(z8530_sync_close); 848 849/** 850 * z8530_sync_dma_open - Open a Z8530 for DMA I/O 851 * @dev: The network device to attach 852 * @c: The Z8530 channel to configure in sync DMA mode. 853 * 854 * Set up a Z85x30 device for synchronous DMA in both directions. Two 855 * ISA DMA channels must be available for this to work. We assume ISA 856 * DMA driven I/O and PC limits on access. 857 */ 858 859int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) 860{ 861 unsigned long cflags, dflags; 862 863 c->sync = 1; 864 c->mtu = dev->mtu+64; 865 c->count = 0; 866 c->skb = NULL; 867 c->skb2 = NULL; 868 /* 869 * Load the DMA interfaces up 870 */ 871 c->rxdma_on = 0; 872 c->txdma_on = 0; 873 874 /* 875 * Allocate the DMA flip buffers. Limit by page size. 876 * Everyone runs 1500 mtu or less on wan links so this 877 * should be fine. 878 */ 879 880 if(c->mtu > PAGE_SIZE/2) 881 return -EMSGSIZE; 882 883 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 884 if(c->rx_buf[0]==NULL) 885 return -ENOBUFS; 886 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2; 887 888 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 889 if(c->tx_dma_buf[0]==NULL) 890 { 891 free_page((unsigned long)c->rx_buf[0]); 892 c->rx_buf[0]=NULL; 893 return -ENOBUFS; 894 } 895 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2; 896 897 c->tx_dma_used=0; 898 c->dma_tx = 1; 899 c->dma_num=0; 900 c->dma_ready=1; 901 902 /* 903 * Enable DMA control mode 904 */ 905 906 spin_lock_irqsave(c->lock, cflags); 907 908 /* 909 * TX DMA via DIR/REQ 910 */ 911 912 c->regs[R14]|= DTRREQ; 913 write_zsreg(c, R14, c->regs[R14]); 914 915 c->regs[R1]&= ~TxINT_ENAB; 916 write_zsreg(c, R1, c->regs[R1]); 917 918 /* 919 * RX DMA via W/Req 920 */ 921 922 c->regs[R1]|= WT_FN_RDYFN; 923 c->regs[R1]|= WT_RDY_RT; 924 c->regs[R1]|= INT_ERR_Rx; 925 c->regs[R1]&= ~TxINT_ENAB; 926 write_zsreg(c, R1, c->regs[R1]); 927 c->regs[R1]|= WT_RDY_ENAB; 928 write_zsreg(c, R1, c->regs[R1]); 929 930 /* 931 * DMA interrupts 932 */ 933 934 /* 935 * Set up the DMA configuration 936 */ 937 938 dflags=claim_dma_lock(); 939 940 disable_dma(c->rxdma); 941 clear_dma_ff(c->rxdma); 942 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); 943 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); 944 set_dma_count(c->rxdma, c->mtu); 945 enable_dma(c->rxdma); 946 947 disable_dma(c->txdma); 948 clear_dma_ff(c->txdma); 949 set_dma_mode(c->txdma, DMA_MODE_WRITE); 950 disable_dma(c->txdma); 951 952 release_dma_lock(dflags); 953 954 /* 955 * Select the DMA interrupt handlers 956 */ 957 958 c->rxdma_on = 1; 959 c->txdma_on = 1; 960 c->tx_dma_used = 1; 961 962 c->irqs = &z8530_dma_sync; 963 z8530_rtsdtr(c,1); 964 write_zsreg(c, R3, c->regs[R3]|RxENABLE); 965 966 spin_unlock_irqrestore(c->lock, cflags); 967 968 return 0; 969} 970 971EXPORT_SYMBOL(z8530_sync_dma_open); 972 973/** 974 * z8530_sync_dma_close - Close down DMA I/O 975 * @dev: Network device to detach 976 * @c: Z8530 channel to move into discard mode 977 * 978 * Shut down a DMA mode synchronous interface. Halt the DMA, and 979 * free the buffers. 980 */ 981 982int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) 983{ 984 u8 chk; 985 unsigned long flags; 986 987 c->irqs = &z8530_nop; 988 c->max = 0; 989 c->sync = 0; 990 991 /* 992 * Disable the PC DMA channels 993 */ 994 995 flags=claim_dma_lock(); 996 disable_dma(c->rxdma); 997 clear_dma_ff(c->rxdma); 998 999 c->rxdma_on = 0; 1000 1001 disable_dma(c->txdma); 1002 clear_dma_ff(c->txdma); 1003 release_dma_lock(flags); 1004 1005 c->txdma_on = 0; 1006 c->tx_dma_used = 0; 1007 1008 spin_lock_irqsave(c->lock, flags); 1009 1010 /* 1011 * Disable DMA control mode 1012 */ 1013 1014 c->regs[R1]&= ~WT_RDY_ENAB; 1015 write_zsreg(c, R1, c->regs[R1]); 1016 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); 1017 c->regs[R1]|= INT_ALL_Rx; 1018 write_zsreg(c, R1, c->regs[R1]); 1019 c->regs[R14]&= ~DTRREQ; 1020 write_zsreg(c, R14, c->regs[R14]); 1021 1022 if(c->rx_buf[0]) 1023 { 1024 free_page((unsigned long)c->rx_buf[0]); 1025 c->rx_buf[0]=NULL; 1026 } 1027 if(c->tx_dma_buf[0]) 1028 { 1029 free_page((unsigned long)c->tx_dma_buf[0]); 1030 c->tx_dma_buf[0]=NULL; 1031 } 1032 chk=read_zsreg(c,R0); 1033 write_zsreg(c, R3, c->regs[R3]); 1034 z8530_rtsdtr(c,0); 1035 1036 spin_unlock_irqrestore(c->lock, flags); 1037 1038 return 0; 1039} 1040 1041EXPORT_SYMBOL(z8530_sync_dma_close); 1042 1043/** 1044 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA 1045 * @dev: The network device to attach 1046 * @c: The Z8530 channel to configure in sync DMA mode. 1047 * 1048 * Set up a Z85x30 device for synchronous DMA tranmission. One 1049 * ISA DMA channel must be available for this to work. The receive 1050 * side is run in PIO mode, but then it has the bigger FIFO. 1051 */ 1052 1053int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) 1054{ 1055 unsigned long cflags, dflags; 1056 1057 printk("Opening sync interface for TX-DMA\n"); 1058 c->sync = 1; 1059 c->mtu = dev->mtu+64; 1060 c->count = 0; 1061 c->skb = NULL; 1062 c->skb2 = NULL; 1063 1064 /* 1065 * Allocate the DMA flip buffers. Limit by page size. 1066 * Everyone runs 1500 mtu or less on wan links so this 1067 * should be fine. 1068 */ 1069 1070 if(c->mtu > PAGE_SIZE/2) 1071 return -EMSGSIZE; 1072 1073 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1074 if(c->tx_dma_buf[0]==NULL) 1075 return -ENOBUFS; 1076 1077 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2; 1078 1079 1080 spin_lock_irqsave(c->lock, cflags); 1081 1082 /* 1083 * Load the PIO receive ring 1084 */ 1085 1086 z8530_rx_done(c); 1087 z8530_rx_done(c); 1088 1089 /* 1090 * Load the DMA interfaces up 1091 */ 1092 1093 c->rxdma_on = 0; 1094 c->txdma_on = 0; 1095 1096 c->tx_dma_used=0; 1097 c->dma_num=0; 1098 c->dma_ready=1; 1099 c->dma_tx = 1; 1100 1101 /* 1102 * Enable DMA control mode 1103 */ 1104 1105 /* 1106 * TX DMA via DIR/REQ 1107 */ 1108 c->regs[R14]|= DTRREQ; 1109 write_zsreg(c, R14, c->regs[R14]); 1110 1111 c->regs[R1]&= ~TxINT_ENAB; 1112 write_zsreg(c, R1, c->regs[R1]); 1113 1114 /* 1115 * Set up the DMA configuration 1116 */ 1117 1118 dflags = claim_dma_lock(); 1119 1120 disable_dma(c->txdma); 1121 clear_dma_ff(c->txdma); 1122 set_dma_mode(c->txdma, DMA_MODE_WRITE); 1123 disable_dma(c->txdma); 1124 1125 release_dma_lock(dflags); 1126 1127 /* 1128 * Select the DMA interrupt handlers 1129 */ 1130 1131 c->rxdma_on = 0; 1132 c->txdma_on = 1; 1133 c->tx_dma_used = 1; 1134 1135 c->irqs = &z8530_txdma_sync; 1136 z8530_rtsdtr(c,1); 1137 write_zsreg(c, R3, c->regs[R3]|RxENABLE); 1138 spin_unlock_irqrestore(c->lock, cflags); 1139 1140 return 0; 1141} 1142 1143EXPORT_SYMBOL(z8530_sync_txdma_open); 1144 1145/** 1146 * z8530_sync_txdma_close - Close down a TX driven DMA channel 1147 * @dev: Network device to detach 1148 * @c: Z8530 channel to move into discard mode 1149 * 1150 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, 1151 * and free the buffers. 1152 */ 1153 1154int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) 1155{ 1156 unsigned long dflags, cflags; 1157 u8 chk; 1158 1159 1160 spin_lock_irqsave(c->lock, cflags); 1161 1162 c->irqs = &z8530_nop; 1163 c->max = 0; 1164 c->sync = 0; 1165 1166 /* 1167 * Disable the PC DMA channels 1168 */ 1169 1170 dflags = claim_dma_lock(); 1171 1172 disable_dma(c->txdma); 1173 clear_dma_ff(c->txdma); 1174 c->txdma_on = 0; 1175 c->tx_dma_used = 0; 1176 1177 release_dma_lock(dflags); 1178 1179 /* 1180 * Disable DMA control mode 1181 */ 1182 1183 c->regs[R1]&= ~WT_RDY_ENAB; 1184 write_zsreg(c, R1, c->regs[R1]); 1185 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); 1186 c->regs[R1]|= INT_ALL_Rx; 1187 write_zsreg(c, R1, c->regs[R1]); 1188 c->regs[R14]&= ~DTRREQ; 1189 write_zsreg(c, R14, c->regs[R14]); 1190 1191 if(c->tx_dma_buf[0]) 1192 { 1193 free_page((unsigned long)c->tx_dma_buf[0]); 1194 c->tx_dma_buf[0]=NULL; 1195 } 1196 chk=read_zsreg(c,R0); 1197 write_zsreg(c, R3, c->regs[R3]); 1198 z8530_rtsdtr(c,0); 1199 1200 spin_unlock_irqrestore(c->lock, cflags); 1201 return 0; 1202} 1203 1204 1205EXPORT_SYMBOL(z8530_sync_txdma_close); 1206 1207 1208/* 1209 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny 1210 * it exists... 1211 */ 1212 1213static char *z8530_type_name[]={ 1214 "Z8530", 1215 "Z85C30", 1216 "Z85230" 1217}; 1218 1219/** 1220 * z8530_describe - Uniformly describe a Z8530 port 1221 * @dev: Z8530 device to describe 1222 * @mapping: string holding mapping type (eg "I/O" or "Mem") 1223 * @io: the port value in question 1224 * 1225 * Describe a Z8530 in a standard format. We must pass the I/O as 1226 * the port offset isnt predictable. The main reason for this function 1227 * is to try and get a common format of report. 1228 */ 1229 1230void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) 1231{ 1232 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n", 1233 dev->name, 1234 z8530_type_name[dev->type], 1235 mapping, 1236 Z8530_PORT_OF(io), 1237 dev->irq); 1238} 1239 1240EXPORT_SYMBOL(z8530_describe); 1241 1242/* 1243 * Locked operation part of the z8530 init code 1244 */ 1245 1246static inline int do_z8530_init(struct z8530_dev *dev) 1247{ 1248 /* NOP the interrupt handlers first - we might get a 1249 floating IRQ transition when we reset the chip */ 1250 dev->chanA.irqs=&z8530_nop; 1251 dev->chanB.irqs=&z8530_nop; 1252 dev->chanA.dcdcheck=DCD; 1253 dev->chanB.dcdcheck=DCD; 1254 1255 /* Reset the chip */ 1256 write_zsreg(&dev->chanA, R9, 0xC0); 1257 udelay(200); 1258 /* Now check its valid */ 1259 write_zsreg(&dev->chanA, R12, 0xAA); 1260 if(read_zsreg(&dev->chanA, R12)!=0xAA) 1261 return -ENODEV; 1262 write_zsreg(&dev->chanA, R12, 0x55); 1263 if(read_zsreg(&dev->chanA, R12)!=0x55) 1264 return -ENODEV; 1265 1266 dev->type=Z8530; 1267 1268 /* 1269 * See the application note. 1270 */ 1271 1272 write_zsreg(&dev->chanA, R15, 0x01); 1273 1274 /* 1275 * If we can set the low bit of R15 then 1276 * the chip is enhanced. 1277 */ 1278 1279 if(read_zsreg(&dev->chanA, R15)==0x01) 1280 { 1281 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */ 1282 /* Put a char in the fifo */ 1283 write_zsreg(&dev->chanA, R8, 0); 1284 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP) 1285 dev->type = Z85230; /* Has a FIFO */ 1286 else 1287 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */ 1288 } 1289 1290 /* 1291 * The code assumes R7' and friends are 1292 * off. Use write_zsext() for these and keep 1293 * this bit clear. 1294 */ 1295 1296 write_zsreg(&dev->chanA, R15, 0); 1297 1298 /* 1299 * At this point it looks like the chip is behaving 1300 */ 1301 1302 memcpy(dev->chanA.regs, reg_init, 16); 1303 memcpy(dev->chanB.regs, reg_init ,16); 1304 1305 return 0; 1306} 1307 1308/** 1309 * z8530_init - Initialise a Z8530 device 1310 * @dev: Z8530 device to initialise. 1311 * 1312 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device 1313 * is present, identify the type and then program it to hopefully 1314 * keep quite and behave. This matters a lot, a Z8530 in the wrong 1315 * state will sometimes get into stupid modes generating 10Khz 1316 * interrupt streams and the like. 1317 * 1318 * We set the interrupt handler up to discard any events, in case 1319 * we get them during reset or setp. 1320 * 1321 * Return 0 for success, or a negative value indicating the problem 1322 * in errno form. 1323 */ 1324 1325int z8530_init(struct z8530_dev *dev) 1326{ 1327 unsigned long flags; 1328 int ret; 1329 1330 /* Set up the chip level lock */ 1331 spin_lock_init(&dev->lock); 1332 dev->chanA.lock = &dev->lock; 1333 dev->chanB.lock = &dev->lock; 1334 1335 spin_lock_irqsave(&dev->lock, flags); 1336 ret = do_z8530_init(dev); 1337 spin_unlock_irqrestore(&dev->lock, flags); 1338 1339 return ret; 1340} 1341 1342 1343EXPORT_SYMBOL(z8530_init); 1344 1345/** 1346 * z8530_shutdown - Shutdown a Z8530 device 1347 * @dev: The Z8530 chip to shutdown 1348 * 1349 * We set the interrupt handlers to silence any interrupts. We then 1350 * reset the chip and wait 100uS to be sure the reset completed. Just 1351 * in case the caller then tries to do stuff. 1352 * 1353 * This is called without the lock held 1354 */ 1355 1356int z8530_shutdown(struct z8530_dev *dev) 1357{ 1358 unsigned long flags; 1359 /* Reset the chip */ 1360 1361 spin_lock_irqsave(&dev->lock, flags); 1362 dev->chanA.irqs=&z8530_nop; 1363 dev->chanB.irqs=&z8530_nop; 1364 write_zsreg(&dev->chanA, R9, 0xC0); 1365 /* We must lock the udelay, the chip is offlimits here */ 1366 udelay(100); 1367 spin_unlock_irqrestore(&dev->lock, flags); 1368 return 0; 1369} 1370 1371EXPORT_SYMBOL(z8530_shutdown); 1372 1373 1374int z8530_channel_load(struct z8530_channel *c, u8 *rtable) 1375{ 1376 unsigned long flags; 1377 1378 spin_lock_irqsave(c->lock, flags); 1379 1380 while(*rtable!=255) 1381 { 1382 int reg=*rtable++; 1383 if(reg>0x0F) 1384 write_zsreg(c, R15, c->regs[15]|1); 1385 write_zsreg(c, reg&0x0F, *rtable); 1386 if(reg>0x0F) 1387 write_zsreg(c, R15, c->regs[15]&~1); 1388 c->regs[reg]=*rtable++; 1389 } 1390 c->rx_function=z8530_null_rx; 1391 c->skb=NULL; 1392 c->tx_skb=NULL; 1393 c->tx_next_skb=NULL; 1394 c->mtu=1500; 1395 c->max=0; 1396 c->count=0; 1397 c->status=read_zsreg(c, R0); 1398 c->sync=1; 1399 write_zsreg(c, R3, c->regs[R3]|RxENABLE); 1400 1401 spin_unlock_irqrestore(c->lock, flags); 1402 return 0; 1403} 1404 1405EXPORT_SYMBOL(z8530_channel_load); 1406 1407 1408/** 1409 * z8530_tx_begin - Begin packet transmission 1410 * @c: The Z8530 channel to kick 1411 * 1412 * This is the speed sensitive side of transmission. If we are called 1413 * and no buffer is being transmitted we commence the next buffer. If 1414 * nothing is queued we idle the sync. 1415 * 1416 * Note: We are handling this code path in the interrupt path, keep it 1417 * fast or bad things will happen. 1418 * 1419 * Called with the lock held. 1420 */ 1421 1422static void z8530_tx_begin(struct z8530_channel *c) 1423{ 1424 unsigned long flags; 1425 if(c->tx_skb) 1426 return; 1427 1428 c->tx_skb=c->tx_next_skb; 1429 c->tx_next_skb=NULL; 1430 c->tx_ptr=c->tx_next_ptr; 1431 1432 if(c->tx_skb==NULL) 1433 { 1434 /* Idle on */ 1435 if(c->dma_tx) 1436 { 1437 flags=claim_dma_lock(); 1438 disable_dma(c->txdma); 1439 /* 1440 * Check if we crapped out. 1441 */ 1442 if(get_dma_residue(c->txdma)) 1443 { 1444 c->stats.tx_dropped++; 1445 c->stats.tx_fifo_errors++; 1446 } 1447 release_dma_lock(flags); 1448 } 1449 c->txcount=0; 1450 } 1451 else 1452 { 1453 c->txcount=c->tx_skb->len; 1454 1455 1456 if(c->dma_tx) 1457 { 1458 1459 flags=claim_dma_lock(); 1460 disable_dma(c->txdma); 1461 1462 /* 1463 * These two are needed by the 8530/85C30 1464 * and must be issued when idling. 1465 */ 1466 1467 if(c->dev->type!=Z85230) 1468 { 1469 write_zsctrl(c, RES_Tx_CRC); 1470 write_zsctrl(c, RES_EOM_L); 1471 } 1472 write_zsreg(c, R10, c->regs[10]&~ABUNDER); 1473 clear_dma_ff(c->txdma); 1474 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); 1475 set_dma_count(c->txdma, c->txcount); 1476 enable_dma(c->txdma); 1477 release_dma_lock(flags); 1478 write_zsctrl(c, RES_EOM_L); 1479 write_zsreg(c, R5, c->regs[R5]|TxENAB); 1480 } 1481 else 1482 { 1483 1484 /* ABUNDER off */ 1485 write_zsreg(c, R10, c->regs[10]); 1486 write_zsctrl(c, RES_Tx_CRC); 1487 1488 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) 1489 { 1490 write_zsreg(c, R8, *c->tx_ptr++); 1491 c->txcount--; 1492 } 1493 1494 } 1495 } 1496 /* 1497 * Since we emptied tx_skb we can ask for more 1498 */ 1499 netif_wake_queue(c->netdevice); 1500} 1501 1502/** 1503 * z8530_tx_done - TX complete callback 1504 * @c: The channel that completed a transmit. 1505 * 1506 * This is called when we complete a packet send. We wake the queue, 1507 * start the next packet going and then free the buffer of the existing 1508 * packet. This code is fairly timing sensitive. 1509 * 1510 * Called with the register lock held. 1511 */ 1512 1513static void z8530_tx_done(struct z8530_channel *c) 1514{ 1515 struct sk_buff *skb; 1516 1517 /* Actually this can happen.*/ 1518 if(c->tx_skb==NULL) 1519 return; 1520 1521 skb=c->tx_skb; 1522 c->tx_skb=NULL; 1523 z8530_tx_begin(c); 1524 c->stats.tx_packets++; 1525 c->stats.tx_bytes+=skb->len; 1526 dev_kfree_skb_irq(skb); 1527} 1528 1529/** 1530 * z8530_null_rx - Discard a packet 1531 * @c: The channel the packet arrived on 1532 * @skb: The buffer 1533 * 1534 * We point the receive handler at this function when idle. Instead 1535 * of syncppp processing the frames we get to throw them away. 1536 */ 1537 1538void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) 1539{ 1540 dev_kfree_skb_any(skb); 1541} 1542 1543EXPORT_SYMBOL(z8530_null_rx); 1544 1545/** 1546 * z8530_rx_done - Receive completion callback 1547 * @c: The channel that completed a receive 1548 * 1549 * A new packet is complete. Our goal here is to get back into receive 1550 * mode as fast as possible. On the Z85230 we could change to using 1551 * ESCC mode, but on the older chips we have no choice. We flip to the 1552 * new buffer immediately in DMA mode so that the DMA of the next 1553 * frame can occur while we are copying the previous buffer to an sk_buff 1554 * 1555 * Called with the lock held 1556 */ 1557 1558static void z8530_rx_done(struct z8530_channel *c) 1559{ 1560 struct sk_buff *skb; 1561 int ct; 1562 1563 /* 1564 * Is our receive engine in DMA mode 1565 */ 1566 1567 if(c->rxdma_on) 1568 { 1569 /* 1570 * Save the ready state and the buffer currently 1571 * being used as the DMA target 1572 */ 1573 1574 int ready=c->dma_ready; 1575 unsigned char *rxb=c->rx_buf[c->dma_num]; 1576 unsigned long flags; 1577 1578 /* 1579 * Complete this DMA. Neccessary to find the length 1580 */ 1581 1582 flags=claim_dma_lock(); 1583 1584 disable_dma(c->rxdma); 1585 clear_dma_ff(c->rxdma); 1586 c->rxdma_on=0; 1587 ct=c->mtu-get_dma_residue(c->rxdma); 1588 if(ct<0) 1589 ct=2; /* Shit happens.. */ 1590 c->dma_ready=0; 1591 1592 /* 1593 * Normal case: the other slot is free, start the next DMA 1594 * into it immediately. 1595 */ 1596 1597 if(ready) 1598 { 1599 c->dma_num^=1; 1600 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); 1601 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); 1602 set_dma_count(c->rxdma, c->mtu); 1603 c->rxdma_on = 1; 1604 enable_dma(c->rxdma); 1605 /* Stop any frames that we missed the head of 1606 from passing */ 1607 write_zsreg(c, R0, RES_Rx_CRC); 1608 } 1609 else 1610 /* Can't occur as we dont reenable the DMA irq until 1611 after the flip is done */ 1612 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1613 1614 release_dma_lock(flags); 1615 1616 /* 1617 * Shove the old buffer into an sk_buff. We can't DMA 1618 * directly into one on a PC - it might be above the 16Mb 1619 * boundary. Optimisation - we could check to see if we 1620 * can avoid the copy. Optimisation 2 - make the memcpy 1621 * a copychecksum. 1622 */ 1623 1624 skb=dev_alloc_skb(ct); 1625 if(skb==NULL) 1626 { 1627 c->stats.rx_dropped++; 1628 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1629 } 1630 else 1631 { 1632 skb_put(skb, ct); 1633 skb_copy_to_linear_data(skb, rxb, ct); 1634 c->stats.rx_packets++; 1635 c->stats.rx_bytes+=ct; 1636 } 1637 c->dma_ready=1; 1638 } 1639 else 1640 { 1641 RT_LOCK; 1642 skb=c->skb; 1643 1644 /* 1645 * The game we play for non DMA is similar. We want to 1646 * get the controller set up for the next packet as fast 1647 * as possible. We potentially only have one byte + the 1648 * fifo length for this. Thus we want to flip to the new 1649 * buffer and then mess around copying and allocating 1650 * things. For the current case it doesn't matter but 1651 * if you build a system where the sync irq isnt blocked 1652 * by the kernel IRQ disable then you need only block the 1653 * sync IRQ for the RT_LOCK area. 1654 * 1655 */ 1656 ct=c->count; 1657 1658 c->skb = c->skb2; 1659 c->count = 0; 1660 c->max = c->mtu; 1661 if(c->skb) 1662 { 1663 c->dptr = c->skb->data; 1664 c->max = c->mtu; 1665 } 1666 else 1667 { 1668 c->count= 0; 1669 c->max = 0; 1670 } 1671 RT_UNLOCK; 1672 1673 c->skb2 = dev_alloc_skb(c->mtu); 1674 if(c->skb2==NULL) 1675 printk(KERN_WARNING "%s: memory squeeze.\n", 1676 c->netdevice->name); 1677 else 1678 { 1679 skb_put(c->skb2,c->mtu); 1680 } 1681 c->stats.rx_packets++; 1682 c->stats.rx_bytes+=ct; 1683 1684 } 1685 /* 1686 * If we received a frame we must now process it. 1687 */ 1688 if(skb) 1689 { 1690 skb_trim(skb, ct); 1691 c->rx_function(c,skb); 1692 } 1693 else 1694 { 1695 c->stats.rx_dropped++; 1696 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1697 } 1698} 1699 1700/** 1701 * spans_boundary - Check a packet can be ISA DMA'd 1702 * @skb: The buffer to check 1703 * 1704 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1705 * thing can only DMA within a 64K block not across the edges of it. 1706 */ 1707 1708static inline int spans_boundary(struct sk_buff *skb) 1709{ 1710 unsigned long a=(unsigned long)skb->data; 1711 a^=(a+skb->len); 1712 if(a&0x00010000) /* If the 64K bit is different.. */ 1713 return 1; 1714 return 0; 1715} 1716 1717/** 1718 * z8530_queue_xmit - Queue a packet 1719 * @c: The channel to use 1720 * @skb: The packet to kick down the channel 1721 * 1722 * Queue a packet for transmission. Because we have rather 1723 * hard to hit interrupt latencies for the Z85230 per packet 1724 * even in DMA mode we do the flip to DMA buffer if needed here 1725 * not in the IRQ. 1726 * 1727 * Called from the network code. The lock is not held at this 1728 * point. 1729 */ 1730 1731int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) 1732{ 1733 unsigned long flags; 1734 1735 netif_stop_queue(c->netdevice); 1736 if(c->tx_next_skb) 1737 { 1738 return 1; 1739 } 1740 1741 /* PC SPECIFIC - DMA limits */ 1742 1743 /* 1744 * If we will DMA the transmit and its gone over the ISA bus 1745 * limit, then copy to the flip buffer 1746 */ 1747 1748 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb))) 1749 { 1750 /* 1751 * Send the flip buffer, and flip the flippy bit. 1752 * We don't care which is used when just so long as 1753 * we never use the same buffer twice in a row. Since 1754 * only one buffer can be going out at a time the other 1755 * has to be safe. 1756 */ 1757 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; 1758 c->tx_dma_used^=1; /* Flip temp buffer */ 1759 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); 1760 } 1761 else 1762 c->tx_next_ptr=skb->data; 1763 RT_LOCK; 1764 c->tx_next_skb=skb; 1765 RT_UNLOCK; 1766 1767 spin_lock_irqsave(c->lock, flags); 1768 z8530_tx_begin(c); 1769 spin_unlock_irqrestore(c->lock, flags); 1770 1771 return 0; 1772} 1773 1774EXPORT_SYMBOL(z8530_queue_xmit); 1775 1776/** 1777 * z8530_get_stats - Get network statistics 1778 * @c: The channel to use 1779 * 1780 * Get the statistics block. We keep the statistics in software as 1781 * the chip doesn't do it for us. 1782 * 1783 * Locking is ignored here - we could lock for a copy but its 1784 * not likely to be that big an issue 1785 */ 1786 1787struct net_device_stats *z8530_get_stats(struct z8530_channel *c) 1788{ 1789 return &c->stats; 1790} 1791 1792EXPORT_SYMBOL(z8530_get_stats); 1793 1794/* 1795 * Module support 1796 */ 1797static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; 1798 1799static int __init z85230_init_driver(void) 1800{ 1801 printk(banner); 1802 return 0; 1803} 1804module_init(z85230_init_driver); 1805 1806static void __exit z85230_cleanup_driver(void) 1807{ 1808} 1809module_exit(z85230_cleanup_driver); 1810 1811MODULE_AUTHOR("Red Hat Inc."); 1812MODULE_DESCRIPTION("Z85x30 synchronous driver core"); 1813MODULE_LICENSE("GPL"); 1814