1/* 2 * 3 * Alchemy Semi Au1000 IrDA driver 4 * 5 * Copyright 2001 MontaVista Software Inc. 6 * Author: MontaVista Software, Inc. 7 * ppopov@mvista.com or source@mvista.com 8 * 9 * ######################################################################## 10 * 11 * This program is free software; you can distribute it and/or modify it 12 * under the terms of the GNU General Public License (Version 2) as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, write to the Free Software Foundation, Inc., 22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 23 * 24 * ######################################################################## 25 * 26 * 27 */ 28 29#ifndef __mips__ 30#error This driver only works with MIPS architectures! 31#endif 32 33 34#include <linux/config.h> 35#include <linux/module.h> 36#include <linux/types.h> 37#include <linux/init.h> 38#include <linux/errno.h> 39#include <linux/netdevice.h> 40#include <linux/slab.h> 41#include <linux/rtnetlink.h> 42#include <linux/interrupt.h> 43#include <linux/pm.h> 44 45#include <asm/irq.h> 46#include <asm/bitops.h> 47#include <asm/io.h> 48#include <asm/au1000.h> 49#include <asm/pb1000.h> 50 51#include <net/irda/irda.h> 52#include <net/irda/irmod.h> 53#include <net/irda/wrapper.h> 54#include <net/irda/irda_device.h> 55#include "net/irda/au1000_ircc.h" 56 57static int au1k_irda_net_init(struct net_device *); 58static int au1k_irda_start(struct net_device *); 59static int au1k_irda_stop(struct net_device *dev); 60static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *); 61static int au1k_irda_rx(struct net_device *); 62static void au1k_irda_interrupt(int, void *, struct pt_regs *); 63static void au1k_tx_timeout(struct net_device *); 64static struct net_device_stats *au1k_irda_stats(struct net_device *); 65static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 66static int au1k_irda_set_speed(struct net_device *dev, int speed); 67 68static void *dma_alloc(size_t, dma_addr_t *); 69static void dma_free(void *, size_t); 70 71static int qos_mtt_bits = 0x07; /* 1 ms or more */ 72static struct net_device *ir_devs[NUM_IR_IFF]; 73static char version[] __devinitdata = 74 "au1k_ircc:1.0 ppopov@mvista.com\n"; 75 76#define RUN_AT(x) (jiffies + (x)) 77 78static spinlock_t ir_lock = SPIN_LOCK_UNLOCKED; 79 80/* 81 * IrDA peripheral bug. You have to read the register 82 * twice to get the right value. 83 */ 84u32 read_ir_reg(u32 addr) 85{ 86 readl(addr); 87 return readl(addr); 88} 89 90 91/* 92 * Buffer allocation/deallocation routines. The buffer descriptor returned 93 * has the virtual and dma address of a buffer suitable for 94 * both, receive and transmit operations. 95 */ 96static db_dest_t *GetFreeDB(struct au1k_private *aup) 97{ 98 db_dest_t *pDB; 99 pDB = aup->pDBfree; 100 101 if (pDB) { 102 aup->pDBfree = pDB->pnext; 103 } 104 return pDB; 105} 106 107static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB) 108{ 109 db_dest_t *pDBfree = aup->pDBfree; 110 if (pDBfree) 111 pDBfree->pnext = pDB; 112 aup->pDBfree = pDB; 113} 114 115 116/* 117 DMA memory allocation, derived from pci_alloc_consistent. 118 However, the Au1000 data cache is coherent (when programmed 119 so), therefore we return KSEG0 address, not KSEG1. 120*/ 121static void *dma_alloc(size_t size, dma_addr_t * dma_handle) 122{ 123 void *ret; 124 int gfp = GFP_ATOMIC | GFP_DMA; 125 126 ret = (void *) __get_free_pages(gfp, get_order(size)); 127 128 if (ret != NULL) { 129 memset(ret, 0, size); 130 *dma_handle = virt_to_bus(ret); 131 ret = KSEG0ADDR(ret); 132 } 133 return ret; 134} 135 136 137static void dma_free(void *vaddr, size_t size) 138{ 139 vaddr = KSEG0ADDR(vaddr); 140 free_pages((unsigned long) vaddr, get_order(size)); 141} 142 143 144static void 145setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base) 146{ 147 int i; 148 for (i=0; i<NUM_IR_DESC; i++) { 149 aup->rx_ring[i] = (volatile ring_dest_t *) 150 (rx_base + sizeof(ring_dest_t)*i); 151 } 152 for (i=0; i<NUM_IR_DESC; i++) { 153 aup->tx_ring[i] = (volatile ring_dest_t *) 154 (tx_base + sizeof(ring_dest_t)*i); 155 } 156} 157 158 159/* 160 * Device has already been stopped at this point. 161 */ 162static void au1k_irda_net_uninit(struct net_device *dev) 163{ 164 dev->hard_start_xmit = NULL; 165 dev->open = NULL; 166 dev->stop = NULL; 167 dev->do_ioctl = NULL; 168 dev->get_stats = NULL; 169 dev->priv = NULL; 170} 171 172 173static int au1k_irda_init(void) 174{ 175 static unsigned version_printed = 0; 176 struct net_device *dev; 177 int err; 178 179 if (version_printed++ == 0) printk(version); 180 181 rtnl_lock(); 182 dev = dev_alloc("irda%d", &err); 183 if (dev) { 184 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */ 185 dev->init = au1k_irda_net_init; 186 dev->uninit = au1k_irda_net_uninit; 187 err = register_netdevice(dev); 188 189 if (err) 190 kfree(dev); 191 else 192 ir_devs[0] = dev; 193 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name); 194 } 195 rtnl_unlock(); 196 return err; 197} 198 199static int au1k_irda_init_iobuf(iobuff_t *io, int size) 200{ 201 io->head = kmalloc(size, GFP_KERNEL); 202 if (io->head != NULL) { 203 io->truesize = size; 204 io->in_frame = FALSE; 205 io->state = OUTSIDE_FRAME; 206 io->data = io->head; 207 } 208 return io->head ? 0 : -ENOMEM; 209} 210 211static int au1k_irda_net_init(struct net_device *dev) 212{ 213 struct au1k_private *aup = NULL; 214 int i, retval = 0, err; 215 db_dest_t *pDB, *pDBfree; 216 unsigned long temp; 217 218 dev->priv = kmalloc(sizeof(struct au1k_private), GFP_KERNEL); 219 if (dev->priv == NULL) { 220 retval = -ENOMEM; 221 goto out; 222 } 223 memset(dev->priv, 0, sizeof(struct au1k_private)); 224 aup = dev->priv; 225 226 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); 227 if (err) 228 goto out; 229 230 dev->open = au1k_irda_start; 231 dev->hard_start_xmit = au1k_irda_hard_xmit; 232 dev->stop = au1k_irda_stop; 233 dev->get_stats = au1k_irda_stats; 234 dev->do_ioctl = au1k_irda_ioctl; 235 dev->tx_timeout = au1k_tx_timeout; 236 237 irda_device_setup(dev); 238 irda_init_max_qos_capabilies(&aup->qos); 239 240 /* The only value we must override it the baudrate */ 241 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 242 IR_115200|IR_576000 |(IR_4000000 << 8); 243 244 aup->qos.min_turn_time.bits = qos_mtt_bits; 245 irda_qos_bits_to_value(&aup->qos); 246 247 248 /* Tx ring follows rx ring + 512 bytes */ 249 /* we need a 1k aligned buffer */ 250 aup->rx_ring[0] = (ring_dest_t *) 251 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp); 252 253 /* allocate the data buffers */ 254 aup->db[0].vaddr = 255 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp); 256 if (!aup->db[0].vaddr || !aup->rx_ring[0]) { 257 retval = -ENOMEM; 258 goto out; 259 } 260 261 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); 262 263 pDBfree = NULL; 264 pDB = aup->db; 265 for (i=0; i<(2*NUM_IR_DESC); i++) { 266 pDB->pnext = pDBfree; 267 pDBfree = pDB; 268 pDB->vaddr = 269 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i); 270 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); 271 pDB++; 272 } 273 aup->pDBfree = pDBfree; 274 275 /* attach a data buffer to each descriptor */ 276 for (i=0; i<NUM_IR_DESC; i++) { 277 pDB = GetFreeDB(aup); 278 if (!pDB) goto out; 279 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 280 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 281 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 282 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 283 aup->rx_db_inuse[i] = pDB; 284 } 285 for (i=0; i<NUM_IR_DESC; i++) { 286 pDB = GetFreeDB(aup); 287 if (!pDB) goto out; 288 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 289 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 290 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 291 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 292 aup->tx_ring[i]->count_0 = 0; 293 aup->tx_ring[i]->count_1 = 0; 294 aup->tx_ring[i]->flags = 0; 295 aup->tx_db_inuse[i] = pDB; 296 } 297 return 0; 298 299out: 300 if (aup->db[0].vaddr) 301 dma_free((void *)aup->db[0].vaddr, 302 MAX_BUF_SIZE * 2*NUM_IR_DESC); 303 if (aup->rx_ring[0]) 304 kfree((void *)aup->rx_ring[0]); 305 if (aup->rx_buff.head) 306 kfree(aup->rx_buff.head); 307 if (dev->priv != NULL) 308 kfree(dev->priv); 309 unregister_netdevice(dev); 310 printk(KERN_ERR "%s: au1k_init_module failed. Returns %d\n", 311 dev->name, retval); 312 return retval; 313} 314 315 316static int au1k_init(struct net_device *dev) 317{ 318 struct au1k_private *aup = (struct au1k_private *) dev->priv; 319 int i; 320 u32 control; 321 u32 ring_address; 322 323 /* bring the device out of reset */ 324 control = 0xe; /* coherent, clock enable, one half system clock */ 325 326#ifndef CONFIG_CPU_LITTLE_ENDIAN 327 control |= 1; 328#endif 329 aup->tx_head = 0; 330 aup->tx_tail = 0; 331 aup->rx_head = 0; 332 333 for (i=0; i<NUM_IR_DESC; i++) { 334 aup->rx_ring[i]->flags = AU_OWN; 335 } 336 337 writel(control, IR_INTERFACE_CONFIG); 338 au_sync_delay(10); 339 340 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */ 341 au_sync_delay(1); 342 343 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN); 344 345 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); 346 writel(ring_address >> 26, IR_RING_BASE_ADDR_H); 347 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L); 348 349 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE); 350 351 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */ 352 writel(0, IR_RING_ADDR_CMPR); 353 354 au1k_irda_set_speed(dev, 9600); 355 return 0; 356} 357 358static int au1k_irda_start(struct net_device *dev) 359{ 360 int retval; 361 char hwname[32]; 362 struct au1k_private *aup = (struct au1k_private *) dev->priv; 363 364 MOD_INC_USE_COUNT; 365 366 if ((retval = au1k_init(dev))) { 367 printk(KERN_ERR "%s: error in au1k_init\n", dev->name); 368 MOD_DEC_USE_COUNT; 369 return retval; 370 } 371 372 if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt, 373 0, dev->name, dev))) { 374 printk(KERN_ERR "%s: unable to get IRQ %d\n", 375 dev->name, dev->irq); 376 MOD_DEC_USE_COUNT; 377 return retval; 378 } 379 if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt, 380 0, dev->name, dev))) { 381 free_irq(AU1000_IRDA_TX_INT, dev); 382 printk(KERN_ERR "%s: unable to get IRQ %d\n", 383 dev->name, dev->irq); 384 MOD_DEC_USE_COUNT; 385 return retval; 386 } 387 388 /* Give self a hardware name */ 389 sprintf(hwname, "Au1000 SIR/FIR"); 390 aup->irlap = irlap_open(dev, &aup->qos, hwname); 391 netif_start_queue(dev); 392 393 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */ 394 395 aup->timer.expires = RUN_AT((3*HZ)); 396 aup->timer.data = (unsigned long)dev; 397 return 0; 398} 399 400static int au1k_irda_stop(struct net_device *dev) 401{ 402 struct au1k_private *aup = (struct au1k_private *) dev->priv; 403 404 /* disable interrupts */ 405 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2); 406 writel(0, IR_CONFIG_1); 407 writel(0, IR_INTERFACE_CONFIG); /* disable clock */ 408 au_sync(); 409 410 if (aup->irlap) { 411 irlap_close(aup->irlap); 412 aup->irlap = NULL; 413 } 414 415 netif_stop_queue(dev); 416 del_timer(&aup->timer); 417 418 /* disable the interrupt */ 419 free_irq(AU1000_IRDA_TX_INT, dev); 420 free_irq(AU1000_IRDA_RX_INT, dev); 421 MOD_DEC_USE_COUNT; 422 return 0; 423} 424 425static void __exit au1k_irda_exit(void) 426{ 427 struct net_device *dev = ir_devs[0]; 428 struct au1k_private *aup = (struct au1k_private *) dev->priv; 429 430 if (!dev) { 431 printk(KERN_ERR "au1k_ircc no dev found\n"); 432 return; 433 } 434 if (aup->db[0].vaddr) { 435 dma_free((void *)aup->db[0].vaddr, 436 MAX_BUF_SIZE * 2*NUM_IR_DESC); 437 aup->db[0].vaddr = 0; 438 } 439 if (aup->rx_ring[0]) { 440 dma_free((void *)aup->rx_ring[0], 441 2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 442 aup->rx_ring[0] = 0; 443 } 444 rtnl_lock(); 445 unregister_netdevice(dev); 446 rtnl_unlock(); 447 ir_devs[0] = 0; 448} 449 450 451static inline void 452update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len) 453{ 454 struct au1k_private *aup = (struct au1k_private *) dev->priv; 455 struct net_device_stats *ps = &aup->stats; 456 457 ps->tx_packets++; 458 ps->tx_bytes += pkt_len; 459 460 if (status & IR_TX_ERROR) { 461 ps->tx_errors++; 462 ps->tx_aborted_errors++; 463 } 464} 465 466 467static void au1k_tx_ack(struct net_device *dev) 468{ 469 struct au1k_private *aup = (struct au1k_private *) dev->priv; 470 volatile ring_dest_t *ptxd; 471 472 ptxd = aup->tx_ring[aup->tx_tail]; 473 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { 474 update_tx_stats(dev, ptxd->flags, 475 ptxd->count_1<<8 | ptxd->count_0); 476 ptxd->count_0 = 0; 477 ptxd->count_1 = 0; 478 au_sync(); 479 480 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); 481 ptxd = aup->tx_ring[aup->tx_tail]; 482 483 if (aup->tx_full) { 484 aup->tx_full = 0; 485 netif_wake_queue(dev); 486 } 487 } 488 489 if (aup->tx_tail == aup->tx_head) { 490 if (aup->newspeed) { 491 au1k_irda_set_speed(dev, aup->newspeed); 492 aup->newspeed = 0; 493 } 494 else { 495 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE, 496 IR_CONFIG_1); 497 au_sync(); 498 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE, 499 IR_CONFIG_1); 500 writel(0, IR_RING_PROMPT); 501 au_sync(); 502 } 503 } 504} 505 506 507/* 508 * Au1000 transmit routine. 509 */ 510static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 511{ 512 struct au1k_private *aup = (struct au1k_private *) dev->priv; 513 int speed = irda_get_next_speed(skb); 514 volatile ring_dest_t *ptxd; 515 u32 len; 516 517 u32 flags; 518 db_dest_t *pDB; 519 520 if (speed != aup->speed && speed != -1) { 521 aup->newspeed = speed; 522 } 523 524 if ((skb->len == 0) && (aup->newspeed)) { 525 if (aup->tx_tail == aup->tx_head) { 526 au1k_irda_set_speed(dev, speed); 527 aup->newspeed = 0; 528 } 529 dev_kfree_skb(skb); 530 return 0; 531 } 532 533 ptxd = aup->tx_ring[aup->tx_head]; 534 flags = ptxd->flags; 535 536 if (flags & AU_OWN) { 537 printk(KERN_INFO "%s: tx_full\n", dev->name); 538 netif_stop_queue(dev); 539 aup->tx_full = 1; 540 return 1; 541 } 542 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) { 543 printk(KERN_INFO "%s: tx_full\n", dev->name); 544 netif_stop_queue(dev); 545 aup->tx_full = 1; 546 return 1; 547 } 548 549 pDB = aup->tx_db_inuse[aup->tx_head]; 550 551 552 if (aup->speed == 4000000) { 553 /* FIR */ 554 memcpy((void *)pDB->vaddr, skb->data, skb->len); 555 ptxd->count_0 = skb->len & 0xff; 556 ptxd->count_1 = (skb->len >> 8) & 0xff; 557 } 558 else { 559 /* SIR */ 560 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); 561 ptxd->count_0 = len & 0xff; 562 ptxd->count_1 = (len >> 8) & 0xff; 563 ptxd->flags |= IR_DIS_CRC; 564 } 565 ptxd->flags |= AU_OWN; 566 au_sync(); 567 568 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1); 569 writel(0, IR_RING_PROMPT); 570 au_sync(); 571 572 dev_kfree_skb(skb); 573 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 574 dev->trans_start = jiffies; 575 return 0; 576} 577 578 579static inline void 580update_rx_stats(struct net_device *dev, u32 status, u32 count) 581{ 582 struct au1k_private *aup = (struct au1k_private *) dev->priv; 583 struct net_device_stats *ps = &aup->stats; 584 585 ps->rx_packets++; 586 587 if (status & IR_RX_ERROR) { 588 ps->rx_errors++; 589 if (status & (IR_PHY_ERROR|IR_FIFO_OVER)) 590 ps->rx_missed_errors++; 591 if (status & IR_MAX_LEN) 592 ps->rx_length_errors++; 593 if (status & IR_CRC_ERROR) 594 ps->rx_crc_errors++; 595 } 596 else 597 ps->rx_bytes += count; 598} 599 600/* 601 * Au1000 receive routine. 602 */ 603static int au1k_irda_rx(struct net_device *dev) 604{ 605 struct au1k_private *aup = (struct au1k_private *) dev->priv; 606 struct sk_buff *skb; 607 volatile ring_dest_t *prxd; 608 u32 flags, count; 609 db_dest_t *pDB; 610 611 prxd = aup->rx_ring[aup->rx_head]; 612 flags = prxd->flags; 613 614 while (!(flags & AU_OWN)) { 615 pDB = aup->rx_db_inuse[aup->rx_head]; 616 count = prxd->count_1<<8 | prxd->count_0; 617 if (!(flags & IR_RX_ERROR)) { 618 /* good frame */ 619 update_rx_stats(dev, flags, count); 620 skb=alloc_skb(count+1,GFP_ATOMIC); 621 if (skb == NULL) { 622 aup->stats.rx_dropped++; 623 continue; 624 } 625 skb_reserve(skb, 1); 626 if (aup->speed == 4000000) 627 skb_put(skb, count); 628 else 629 skb_put(skb, count-2); 630 memcpy(skb->data, (void *)pDB->vaddr, count-2); 631 skb->dev = dev; 632 skb->mac.raw = skb->data; 633 skb->protocol = htons(ETH_P_IRDA); 634 netif_rx(skb); 635 prxd->count_0 = 0; 636 prxd->count_1 = 0; 637 } 638 prxd->flags |= AU_OWN; 639 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1); 640 writel(0, IR_RING_PROMPT); 641 au_sync(); 642 643 /* next descriptor */ 644 prxd = aup->rx_ring[aup->rx_head]; 645 flags = prxd->flags; 646 dev->last_rx = jiffies; 647 648 } 649 return 0; 650} 651 652 653void au1k_irda_interrupt(int irq, void *dev_id, struct pt_regs *regs) 654{ 655 struct net_device *dev = (struct net_device *) dev_id; 656 657 if (dev == NULL) { 658 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); 659 return; 660 } 661 662 writel(0, IR_INT_CLEAR); /* ack irda interrupts */ 663 664 au1k_irda_rx(dev); 665 au1k_tx_ack(dev); 666} 667 668 669/* 670 * The Tx ring has been full longer than the watchdog timeout 671 * value. The transmitter must be hung? 672 */ 673static void au1k_tx_timeout(struct net_device *dev) 674{ 675 u32 speed; 676 struct au1k_private *aup = (struct au1k_private *) dev->priv; 677 678 printk(KERN_ERR "%s: tx timeout\n", dev->name); 679 speed = aup->speed; 680 aup->speed = 0; 681 au1k_irda_set_speed(dev, speed); 682 aup->tx_full = 0; 683 netif_wake_queue(dev); 684} 685 686 687/* 688 * Set the IrDA communications speed. 689 */ 690static int 691au1k_irda_set_speed(struct net_device *dev, int speed) 692{ 693 unsigned long flags; 694 struct au1k_private *aup = (struct au1k_private *) dev->priv; 695 u32 control; 696 int ret = 0, timeout = 10, i; 697 volatile ring_dest_t *ptxd; 698 699 if (speed == aup->speed) 700 return ret; 701 702 spin_lock_irqsave(&ir_lock, flags); 703 704 /* disable PHY first */ 705 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); 706 707 /* disable RX/TX */ 708 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE), 709 IR_CONFIG_1); 710 au_sync_delay(1); 711 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) { 712 mdelay(1); 713 if (!timeout--) { 714 printk(KERN_ERR "%s: rx/tx disable timeout\n", 715 dev->name); 716 break; 717 } 718 } 719 720 /* disable DMA */ 721 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1); 722 au_sync_delay(1); 723 724 /* 725 * After we disable tx/rx. the index pointers 726 * go back to zero. 727 */ 728 aup->tx_head = aup->tx_tail = aup->rx_head = 0; 729 for (i=0; i<NUM_IR_DESC; i++) { 730 ptxd = aup->tx_ring[i]; 731 ptxd->flags = 0; 732 ptxd->count_0 = 0; 733 ptxd->count_1 = 0; 734 } 735 736 for (i=0; i<NUM_IR_DESC; i++) { 737 ptxd = aup->rx_ring[i]; 738 ptxd->count_0 = 0; 739 ptxd->count_1 = 0; 740 ptxd->flags = AU_OWN; 741 } 742 743 if (speed == 4000000) 744 writel(1<<13, CPLD_AUX1); 745 else 746 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1); 747 748 switch (speed) { 749 case 9600: 750 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 751 writel(IR_SIR_MODE, IR_CONFIG_1); 752 break; 753 case 19200: 754 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 755 writel(IR_SIR_MODE, IR_CONFIG_1); 756 break; 757 case 38400: 758 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 759 writel(IR_SIR_MODE, IR_CONFIG_1); 760 break; 761 case 57600: 762 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 763 writel(IR_SIR_MODE, IR_CONFIG_1); 764 break; 765 case 115200: 766 writel(12<<5, IR_WRITE_PHY_CONFIG); 767 writel(IR_SIR_MODE, IR_CONFIG_1); 768 break; 769 case 4000000: 770 writel(0xF, IR_WRITE_PHY_CONFIG); 771 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1); 772 break; 773 default: 774 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed); 775 ret = -EINVAL; 776 break; 777 } 778 779 aup->speed = speed; 780 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE); 781 au_sync(); 782 783 control = read_ir_reg(IR_ENABLE); 784 writel(0, IR_RING_PROMPT); 785 au_sync(); 786 787 if (control & (1<<14)) { 788 printk(KERN_ERR "%s: configuration error\n", dev->name); 789 } 790 else { 791 if (control & (1<<11)) 792 printk(KERN_INFO "%s Valid SIR config\n", dev->name); 793 if (control & (1<<12)) 794 printk(KERN_INFO "%s Valid MIR config\n", dev->name); 795 if (control & (1<<13)) 796 printk(KERN_INFO "%s Valid FIR config\n", dev->name); 797 if (control & (1<<10)) 798 printk(KERN_INFO "%s TX enabled\n", dev->name); 799 if (control & (1<<9)) 800 printk(KERN_INFO "%s RX enabled\n", dev->name); 801 } 802 803 spin_unlock_irqrestore(&ir_lock, flags); 804 return ret; 805} 806 807static int 808au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 809{ 810 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 811 struct au1k_private *aup = dev->priv; 812 int ret = -EOPNOTSUPP; 813 814 switch (cmd) { 815 case SIOCSBANDWIDTH: 816 if (capable(CAP_NET_ADMIN)) { 817 /* 818 * We are unable to set the speed if the 819 * device is not running. 820 */ 821 if (aup->open) 822 ret = au1k_irda_set_speed(dev, 823 rq->ifr_baudrate); 824 else { 825 printk(KERN_ERR "%s ioctl: !netif_running\n", 826 dev->name); 827 ret = 0; 828 } 829 } 830 break; 831 832 case SIOCSMEDIABUSY: 833 ret = -EPERM; 834 if (capable(CAP_NET_ADMIN)) { 835 irda_device_set_media_busy(dev, TRUE); 836 ret = 0; 837 } 838 break; 839 840 case SIOCGRECEIVING: 841 rq->ifr_receiving = 0; 842 break; 843 default: 844 break; 845 } 846 return ret; 847} 848 849 850static struct net_device_stats *au1k_irda_stats(struct net_device *dev) 851{ 852 struct au1k_private *aup = (struct au1k_private *) dev->priv; 853 return &aup->stats; 854} 855 856#ifdef MODULE 857MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 858MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 859 860module_init(au1k_irda_init); 861module_exit(au1k_irda_exit); 862#endif /* MODULE */ 863