1/*
2 *	$Id: pci.c,v 1.1.1.1 2007/08/03 18:52:51 Exp $
3 *
4 *	PCI Bus Services, see include/linux/pci.h for further explanation.
5 *
6 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 *	David Mosberger-Tang
8 *
9 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 */
11
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pm.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/string.h>
20#include <asm/dma.h>	/* isa_dma_bridge_buggy */
21#include "pci.h"
22
23unsigned int pci_pm_d3_delay = 10;
24
25#define DEFAULT_CARDBUS_IO_SIZE		(256)
26#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
27/* pci=cbmemsize=nnM,cbiosize=nn can override this */
28unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
29unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
30
31/**
32 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
33 * @bus: pointer to PCI bus structure to search
34 *
35 * Given a PCI bus, returns the highest PCI bus number present in the set
36 * including the given PCI bus and its list of child PCI buses.
37 */
38unsigned char pci_bus_max_busnr(struct pci_bus* bus)
39{
40	struct list_head *tmp;
41	unsigned char max, n;
42
43	max = bus->subordinate;
44	list_for_each(tmp, &bus->children) {
45		n = pci_bus_max_busnr(pci_bus_b(tmp));
46		if(n > max)
47			max = n;
48	}
49	return max;
50}
51EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
52
53
54#define PCI_FIND_CAP_TTL	48
55
56static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
57				   u8 pos, int cap, int *ttl)
58{
59	u8 id;
60
61	while ((*ttl)--) {
62		pci_bus_read_config_byte(bus, devfn, pos, &pos);
63		if (pos < 0x40)
64			break;
65		pos &= ~3;
66		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
67					 &id);
68		if (id == 0xff)
69			break;
70		if (id == cap)
71			return pos;
72		pos += PCI_CAP_LIST_NEXT;
73	}
74	return 0;
75}
76
77static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
78			       u8 pos, int cap)
79{
80	int ttl = PCI_FIND_CAP_TTL;
81
82	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
83}
84
85int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
86{
87	return __pci_find_next_cap(dev->bus, dev->devfn,
88				   pos + PCI_CAP_LIST_NEXT, cap);
89}
90EXPORT_SYMBOL_GPL(pci_find_next_capability);
91
92static int __pci_bus_find_cap_start(struct pci_bus *bus,
93				    unsigned int devfn, u8 hdr_type)
94{
95	u16 status;
96
97	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
98	if (!(status & PCI_STATUS_CAP_LIST))
99		return 0;
100
101	switch (hdr_type) {
102	case PCI_HEADER_TYPE_NORMAL:
103	case PCI_HEADER_TYPE_BRIDGE:
104		return PCI_CAPABILITY_LIST;
105	case PCI_HEADER_TYPE_CARDBUS:
106		return PCI_CB_CAPABILITY_LIST;
107	default:
108		return 0;
109	}
110
111	return 0;
112}
113
114/**
115 * pci_find_capability - query for devices' capabilities
116 * @dev: PCI device to query
117 * @cap: capability code
118 *
119 * Tell if a device supports a given PCI capability.
120 * Returns the address of the requested capability structure within the
121 * device's PCI configuration space or 0 in case the device does not
122 * support it.  Possible values for @cap:
123 *
124 *  %PCI_CAP_ID_PM           Power Management
125 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
126 *  %PCI_CAP_ID_VPD          Vital Product Data
127 *  %PCI_CAP_ID_SLOTID       Slot Identification
128 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
129 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
130 *  %PCI_CAP_ID_PCIX         PCI-X
131 *  %PCI_CAP_ID_EXP          PCI Express
132 */
133int pci_find_capability(struct pci_dev *dev, int cap)
134{
135	int pos;
136
137	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
138	if (pos)
139		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
140
141	return pos;
142}
143
144/**
145 * pci_bus_find_capability - query for devices' capabilities
146 * @bus:   the PCI bus to query
147 * @devfn: PCI device to query
148 * @cap:   capability code
149 *
150 * Like pci_find_capability() but works for pci devices that do not have a
151 * pci_dev structure set up yet.
152 *
153 * Returns the address of the requested capability structure within the
154 * device's PCI configuration space or 0 in case the device does not
155 * support it.
156 */
157int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
158{
159	int pos;
160	u8 hdr_type;
161
162	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
163
164	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
165	if (pos)
166		pos = __pci_find_next_cap(bus, devfn, pos, cap);
167
168	return pos;
169}
170
171/**
172 * pci_find_ext_capability - Find an extended capability
173 * @dev: PCI device to query
174 * @cap: capability code
175 *
176 * Returns the address of the requested extended capability structure
177 * within the device's PCI configuration space or 0 if the device does
178 * not support it.  Possible values for @cap:
179 *
180 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
181 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
182 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
183 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
184 */
185int pci_find_ext_capability(struct pci_dev *dev, int cap)
186{
187	u32 header;
188	int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
189	int pos = 0x100;
190
191	if (dev->cfg_size <= 256)
192		return 0;
193
194	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
195		return 0;
196
197	/*
198	 * If we have no capabilities, this is indicated by cap ID,
199	 * cap version and next pointer all being 0.
200	 */
201	if (header == 0)
202		return 0;
203
204	while (ttl-- > 0) {
205		if (PCI_EXT_CAP_ID(header) == cap)
206			return pos;
207
208		pos = PCI_EXT_CAP_NEXT(header);
209		if (pos < 0x100)
210			break;
211
212		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
213			break;
214	}
215
216	return 0;
217}
218EXPORT_SYMBOL_GPL(pci_find_ext_capability);
219
220static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
221{
222	int rc, ttl = PCI_FIND_CAP_TTL;
223	u8 cap, mask;
224
225	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
226		mask = HT_3BIT_CAP_MASK;
227	else
228		mask = HT_5BIT_CAP_MASK;
229
230	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
231				      PCI_CAP_ID_HT, &ttl);
232	while (pos) {
233		rc = pci_read_config_byte(dev, pos + 3, &cap);
234		if (rc != PCIBIOS_SUCCESSFUL)
235			return 0;
236
237		if ((cap & mask) == ht_cap)
238			return pos;
239
240		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
241					      pos + PCI_CAP_LIST_NEXT,
242					      PCI_CAP_ID_HT, &ttl);
243	}
244
245	return 0;
246}
247/**
248 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
249 * @dev: PCI device to query
250 * @pos: Position from which to continue searching
251 * @ht_cap: Hypertransport capability code
252 *
253 * To be used in conjunction with pci_find_ht_capability() to search for
254 * all capabilities matching @ht_cap. @pos should always be a value returned
255 * from pci_find_ht_capability().
256 *
257 * NB. To be 100% safe against broken PCI devices, the caller should take
258 * steps to avoid an infinite loop.
259 */
260int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
261{
262	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
263}
264EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
265
266/**
267 * pci_find_ht_capability - query a device's Hypertransport capabilities
268 * @dev: PCI device to query
269 * @ht_cap: Hypertransport capability code
270 *
271 * Tell if a device supports a given Hypertransport capability.
272 * Returns an address within the device's PCI configuration space
273 * or 0 in case the device does not support the request capability.
274 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
275 * which has a Hypertransport capability matching @ht_cap.
276 */
277int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
278{
279	int pos;
280
281	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
282	if (pos)
283		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
284
285	return pos;
286}
287EXPORT_SYMBOL_GPL(pci_find_ht_capability);
288
289/**
290 * pci_find_parent_resource - return resource region of parent bus of given region
291 * @dev: PCI device structure contains resources to be searched
292 * @res: child resource record for which parent is sought
293 *
294 *  For given resource region of given device, return the resource
295 *  region of parent bus the given region is contained in or where
296 *  it should be allocated from.
297 */
298struct resource *
299pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
300{
301	const struct pci_bus *bus = dev->bus;
302	int i;
303	struct resource *best = NULL;
304
305	for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
306		struct resource *r = bus->resource[i];
307		if (!r)
308			continue;
309		if (res->start && !(res->start >= r->start && res->end <= r->end))
310			continue;	/* Not contained */
311		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
312			continue;	/* Wrong type */
313		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
314			return r;	/* Exact match */
315		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
316			best = r;	/* Approximating prefetchable by non-prefetchable */
317	}
318	return best;
319}
320
321/**
322 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
323 * @dev: PCI device to have its BARs restored
324 *
325 * Restore the BAR values for a given device, so as to make it
326 * accessible by its driver.
327 */
328void
329pci_restore_bars(struct pci_dev *dev)
330{
331	int i, numres;
332
333	switch (dev->hdr_type) {
334	case PCI_HEADER_TYPE_NORMAL:
335		numres = 6;
336		break;
337	case PCI_HEADER_TYPE_BRIDGE:
338		numres = 2;
339		break;
340	case PCI_HEADER_TYPE_CARDBUS:
341		numres = 1;
342		break;
343	default:
344		/* Should never get here, but just in case... */
345		return;
346	}
347
348	for (i = 0; i < numres; i ++)
349		pci_update_resource(dev, &dev->resource[i], i);
350}
351
352int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
353
354/**
355 * pci_set_power_state - Set the power state of a PCI device
356 * @dev: PCI device to be suspended
357 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
358 *
359 * Transition a device to a new power state, using the Power Management
360 * Capabilities in the device's config space.
361 *
362 * RETURN VALUE:
363 * -EINVAL if trying to enter a lower state than we're already in.
364 * 0 if we're already in the requested state.
365 * -EIO if device does not support PCI PM.
366 * 0 if we can successfully change the power state.
367 */
368int
369pci_set_power_state(struct pci_dev *dev, pci_power_t state)
370{
371	int pm, need_restore = 0;
372	u16 pmcsr, pmc;
373
374	/* bound the state we're entering */
375	if (state > PCI_D3hot)
376		state = PCI_D3hot;
377
378	/*
379	 * If the device or the parent bridge can't support PCI PM, ignore
380	 * the request if we're doing anything besides putting it into D0
381	 * (which would only happen on boot).
382	 */
383	if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
384		return 0;
385
386	/* Validate current state:
387	 * Can enter D0 from any state, but if we can only go deeper
388	 * to sleep if we're already in a low power state
389	 */
390	if (state != PCI_D0 && dev->current_state > state) {
391		printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
392			__FUNCTION__, pci_name(dev), state, dev->current_state);
393		return -EINVAL;
394	} else if (dev->current_state == state)
395		return 0;        /* we're already there */
396
397
398	/* find PCI PM capability in list */
399	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
400
401	/* abort if the device doesn't support PM capabilities */
402	if (!pm)
403		return -EIO;
404
405	pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
406	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
407		printk(KERN_DEBUG
408		       "PCI: %s has unsupported PM cap regs version (%u)\n",
409		       pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
410		return -EIO;
411	}
412
413	/* check if this device supports the desired state */
414	if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
415		return -EIO;
416	else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
417		return -EIO;
418
419	pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
420
421	/* If we're (effectively) in D3, force entire word to 0.
422	 * This doesn't affect PME_Status, disables PME_En, and
423	 * sets PowerState to 0.
424	 */
425	switch (dev->current_state) {
426	case PCI_D0:
427	case PCI_D1:
428	case PCI_D2:
429		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
430		pmcsr |= state;
431		break;
432	case PCI_UNKNOWN: /* Boot-up */
433		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
434		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
435			need_restore = 1;
436		/* Fall-through: force to D0 */
437	default:
438		pmcsr = 0;
439		break;
440	}
441
442	/* enter specified state */
443	pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
444
445	/* Mandatory power management transition delays */
446	/* see PCI PM 1.1 5.6.1 table 18 */
447	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
448		msleep(pci_pm_d3_delay);
449	else if (state == PCI_D2 || dev->current_state == PCI_D2)
450		udelay(200);
451
452	/*
453	 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
454	 * Firmware method after native method ?
455	 */
456	if (platform_pci_set_power_state)
457		platform_pci_set_power_state(dev, state);
458
459	dev->current_state = state;
460
461	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
462	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
463	 * from D3hot to D0 _may_ perform an internal reset, thereby
464	 * going to "D0 Uninitialized" rather than "D0 Initialized".
465	 * For example, at least some versions of the 3c905B and the
466	 * 3c556B exhibit this behaviour.
467	 *
468	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
469	 * devices in a D3hot state at boot.  Consequently, we need to
470	 * restore at least the BARs so that the device will be
471	 * accessible to its driver.
472	 */
473	if (need_restore)
474		pci_restore_bars(dev);
475
476	return 0;
477}
478
479int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
480
481/**
482 * pci_choose_state - Choose the power state of a PCI device
483 * @dev: PCI device to be suspended
484 * @state: target sleep state for the whole system. This is the value
485 *	that is passed to suspend() function.
486 *
487 * Returns PCI power state suitable for given device and given system
488 * message.
489 */
490
491pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
492{
493	int ret;
494
495	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
496		return PCI_D0;
497
498	if (platform_pci_choose_state) {
499		ret = platform_pci_choose_state(dev, state);
500		if (ret >= 0)
501			state.event = ret;
502	}
503
504	switch (state.event) {
505	case PM_EVENT_ON:
506		return PCI_D0;
507	case PM_EVENT_FREEZE:
508	case PM_EVENT_PRETHAW:
509		/* REVISIT both freeze and pre-thaw "should" use D0 */
510	case PM_EVENT_SUSPEND:
511		return PCI_D3hot;
512	default:
513		printk("Unrecognized suspend event %d\n", state.event);
514		BUG();
515	}
516	return PCI_D0;
517}
518
519EXPORT_SYMBOL(pci_choose_state);
520
521static int pci_save_pcie_state(struct pci_dev *dev)
522{
523	int pos, i = 0;
524	struct pci_cap_saved_state *save_state;
525	u16 *cap;
526
527	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
528	if (pos <= 0)
529		return 0;
530
531	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
532	if (!save_state)
533		save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL);
534	if (!save_state) {
535		dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
536		return -ENOMEM;
537	}
538	cap = (u16 *)&save_state->data[0];
539
540	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
541	pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
542	pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
543	pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
544	pci_add_saved_cap(dev, save_state);
545	return 0;
546}
547
548static void pci_restore_pcie_state(struct pci_dev *dev)
549{
550	int i = 0, pos;
551	struct pci_cap_saved_state *save_state;
552	u16 *cap;
553
554	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
555	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
556	if (!save_state || pos <= 0)
557		return;
558	cap = (u16 *)&save_state->data[0];
559
560	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
561	pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
562	pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
563	pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
564}
565
566
567static int pci_save_pcix_state(struct pci_dev *dev)
568{
569	int pos, i = 0;
570	struct pci_cap_saved_state *save_state;
571	u16 *cap;
572
573	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
574	if (pos <= 0)
575		return 0;
576
577	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
578	if (!save_state)
579		save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
580	if (!save_state) {
581		dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
582		return -ENOMEM;
583	}
584	cap = (u16 *)&save_state->data[0];
585
586	pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
587	pci_add_saved_cap(dev, save_state);
588	return 0;
589}
590
591static void pci_restore_pcix_state(struct pci_dev *dev)
592{
593	int i = 0, pos;
594	struct pci_cap_saved_state *save_state;
595	u16 *cap;
596
597	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
598	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
599	if (!save_state || pos <= 0)
600		return;
601	cap = (u16 *)&save_state->data[0];
602
603	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
604}
605
606
607/**
608 * pci_save_state - save the PCI configuration space of a device before suspending
609 * @dev: - PCI device that we're dealing with
610 */
611int
612pci_save_state(struct pci_dev *dev)
613{
614	int i;
615	for (i = 0; i < 16; i++)
616		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
617	if ((i = pci_save_pcie_state(dev)) != 0)
618		return i;
619	if ((i = pci_save_pcix_state(dev)) != 0)
620		return i;
621	return 0;
622}
623
624/**
625 * pci_restore_state - Restore the saved state of a PCI device
626 * @dev: - PCI device that we're dealing with
627 */
628int
629pci_restore_state(struct pci_dev *dev)
630{
631	int i;
632	int val;
633
634	/* PCI Express register must be restored first */
635	pci_restore_pcie_state(dev);
636
637	/*
638	 * The Base Address register should be programmed before the command
639	 * register(s)
640	 */
641	for (i = 15; i >= 0; i--) {
642		pci_read_config_dword(dev, i * 4, &val);
643		if (val != dev->saved_config_space[i]) {
644			printk(KERN_DEBUG "PM: Writing back config space on "
645				"device %s at offset %x (was %x, writing %x)\n",
646				pci_name(dev), i,
647				val, (int)dev->saved_config_space[i]);
648			pci_write_config_dword(dev,i * 4,
649				dev->saved_config_space[i]);
650		}
651	}
652	pci_restore_pcix_state(dev);
653	pci_restore_msi_state(dev);
654
655	return 0;
656}
657
658static int do_pci_enable_device(struct pci_dev *dev, int bars)
659{
660	int err;
661
662	err = pci_set_power_state(dev, PCI_D0);
663	if (err < 0 && err != -EIO)
664		return err;
665	err = pcibios_enable_device(dev, bars);
666	if (err < 0)
667		return err;
668	pci_fixup_device(pci_fixup_enable, dev);
669
670	return 0;
671}
672
673/**
674 * __pci_reenable_device - Resume abandoned device
675 * @dev: PCI device to be resumed
676 *
677 *  Note this function is a backend of pci_default_resume and is not supposed
678 *  to be called by normal code, write proper resume handler and use it instead.
679 */
680int
681__pci_reenable_device(struct pci_dev *dev)
682{
683	if (atomic_read(&dev->enable_cnt))
684		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
685	return 0;
686}
687
688/**
689 * pci_enable_device_bars - Initialize some of a device for use
690 * @dev: PCI device to be initialized
691 * @bars: bitmask of BAR's that must be configured
692 *
693 *  Initialize device before it's used by a driver. Ask low-level code
694 *  to enable selected I/O and memory resources. Wake up the device if it
695 *  was suspended. Beware, this function can fail.
696 */
697int
698pci_enable_device_bars(struct pci_dev *dev, int bars)
699{
700	int err;
701
702	if (atomic_add_return(1, &dev->enable_cnt) > 1)
703		return 0;		/* already enabled */
704
705	err = do_pci_enable_device(dev, bars);
706	if (err < 0)
707		atomic_dec(&dev->enable_cnt);
708	return err;
709}
710
711/**
712 * pci_enable_device - Initialize device before it's used by a driver.
713 * @dev: PCI device to be initialized
714 *
715 *  Initialize device before it's used by a driver. Ask low-level code
716 *  to enable I/O and memory. Wake up the device if it was suspended.
717 *  Beware, this function can fail.
718 *
719 *  Note we don't actually enable the device many times if we call
720 *  this function repeatedly (we just increment the count).
721 */
722int pci_enable_device(struct pci_dev *dev)
723{
724	return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
725}
726
727/*
728 * Managed PCI resources.  This manages device on/off, intx/msi/msix
729 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
730 * there's no need to track it separately.  pci_devres is initialized
731 * when a device is enabled using managed PCI device enable interface.
732 */
733struct pci_devres {
734	unsigned int enabled:1;
735	unsigned int pinned:1;
736	unsigned int orig_intx:1;
737	unsigned int restore_intx:1;
738	u32 region_mask;
739};
740
741static void pcim_release(struct device *gendev, void *res)
742{
743	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
744	struct pci_devres *this = res;
745	int i;
746
747	if (dev->msi_enabled)
748		pci_disable_msi(dev);
749	if (dev->msix_enabled)
750		pci_disable_msix(dev);
751
752	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
753		if (this->region_mask & (1 << i))
754			pci_release_region(dev, i);
755
756	if (this->restore_intx)
757		pci_intx(dev, this->orig_intx);
758
759	if (this->enabled && !this->pinned)
760		pci_disable_device(dev);
761}
762
763static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
764{
765	struct pci_devres *dr, *new_dr;
766
767	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
768	if (dr)
769		return dr;
770
771	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
772	if (!new_dr)
773		return NULL;
774	return devres_get(&pdev->dev, new_dr, NULL, NULL);
775}
776
777static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
778{
779	if (pci_is_managed(pdev))
780		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
781	return NULL;
782}
783
784/**
785 * pcim_enable_device - Managed pci_enable_device()
786 * @pdev: PCI device to be initialized
787 *
788 * Managed pci_enable_device().
789 */
790int pcim_enable_device(struct pci_dev *pdev)
791{
792	struct pci_devres *dr;
793	int rc;
794
795	dr = get_pci_dr(pdev);
796	if (unlikely(!dr))
797		return -ENOMEM;
798	WARN_ON(!!dr->enabled);
799
800	rc = pci_enable_device(pdev);
801	if (!rc) {
802		pdev->is_managed = 1;
803		dr->enabled = 1;
804	}
805	return rc;
806}
807
808/**
809 * pcim_pin_device - Pin managed PCI device
810 * @pdev: PCI device to pin
811 *
812 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
813 * driver detach.  @pdev must have been enabled with
814 * pcim_enable_device().
815 */
816void pcim_pin_device(struct pci_dev *pdev)
817{
818	struct pci_devres *dr;
819
820	dr = find_pci_dr(pdev);
821	WARN_ON(!dr || !dr->enabled);
822	if (dr)
823		dr->pinned = 1;
824}
825
826/**
827 * pcibios_disable_device - disable arch specific PCI resources for device dev
828 * @dev: the PCI device to disable
829 *
830 * Disables architecture specific PCI resources for the device. This
831 * is the default implementation. Architecture implementations can
832 * override this.
833 */
834void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
835
836/**
837 * pci_disable_device - Disable PCI device after use
838 * @dev: PCI device to be disabled
839 *
840 * Signal to the system that the PCI device is not in use by the system
841 * anymore.  This only involves disabling PCI bus-mastering, if active.
842 *
843 * Note we don't actually disable the device until all callers of
844 * pci_device_enable() have called pci_device_disable().
845 */
846void
847pci_disable_device(struct pci_dev *dev)
848{
849	struct pci_devres *dr;
850	u16 pci_command;
851
852	dr = find_pci_dr(dev);
853	if (dr)
854		dr->enabled = 0;
855
856	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
857		return;
858
859	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
860	if (pci_command & PCI_COMMAND_MASTER) {
861		pci_command &= ~PCI_COMMAND_MASTER;
862		pci_write_config_word(dev, PCI_COMMAND, pci_command);
863	}
864	dev->is_busmaster = 0;
865
866	pcibios_disable_device(dev);
867}
868
869/**
870 * pcibios_set_pcie_reset_state - set reset state for device dev
871 * @dev: the PCI-E device reset
872 * @state: Reset state to enter into
873 *
874 *
875 * Sets the PCI-E reset state for the device. This is the default
876 * implementation. Architecture implementations can override this.
877 */
878int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
879							enum pcie_reset_state state)
880{
881	return -EINVAL;
882}
883
884/**
885 * pci_set_pcie_reset_state - set reset state for device dev
886 * @dev: the PCI-E device reset
887 * @state: Reset state to enter into
888 *
889 *
890 * Sets the PCI reset state for the device.
891 */
892int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
893{
894	return pcibios_set_pcie_reset_state(dev, state);
895}
896
897/**
898 * pci_enable_wake - enable PCI device as wakeup event source
899 * @dev: PCI device affected
900 * @state: PCI state from which device will issue wakeup events
901 * @enable: True to enable event generation; false to disable
902 *
903 * This enables the device as a wakeup event source, or disables it.
904 * When such events involves platform-specific hooks, those hooks are
905 * called automatically by this routine.
906 *
907 * Devices with legacy power management (no standard PCI PM capabilities)
908 * always require such platform hooks.  Depending on the platform, devices
909 * supporting the standard PCI PME# signal may require such platform hooks;
910 * they always update bits in config space to allow PME# generation.
911 *
912 * -EIO is returned if the device can't ever be a wakeup event source.
913 * -EINVAL is returned if the device can't generate wakeup events from
914 * the specified PCI state.  Returns zero if the operation is successful.
915 */
916int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
917{
918	int pm;
919	int status;
920	u16 value;
921
922	/* Note that drivers should verify device_may_wakeup(&dev->dev)
923	 * before calling this function.  Platform code should report
924	 * errors when drivers try to enable wakeup on devices that
925	 * can't issue wakeups, or on which wakeups were disabled by
926	 * userspace updating the /sys/devices.../power/wakeup file.
927	 */
928
929	status = call_platform_enable_wakeup(&dev->dev, enable);
930
931	/* find PCI PM capability in list */
932	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
933
934	/* If device doesn't support PM Capabilities, but caller wants to
935	 * disable wake events, it's a NOP.  Otherwise fail unless the
936	 * platform hooks handled this legacy device already.
937	 */
938	if (!pm)
939		return enable ? status : 0;
940
941	/* Check device's ability to generate PME# */
942	pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
943
944	value &= PCI_PM_CAP_PME_MASK;
945	value >>= ffs(PCI_PM_CAP_PME_MASK) - 1;   /* First bit of mask */
946
947	/* Check if it can generate PME# from requested state. */
948	if (!value || !(value & (1 << state))) {
949		/* if it can't, revert what the platform hook changed,
950		 * always reporting the base "EINVAL, can't PME#" error
951		 */
952		if (enable)
953			call_platform_enable_wakeup(&dev->dev, 0);
954		return enable ? -EINVAL : 0;
955	}
956
957	pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
958
959	/* Clear PME_Status by writing 1 to it and enable PME# */
960	value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
961
962	if (!enable)
963		value &= ~PCI_PM_CTRL_PME_ENABLE;
964
965	pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
966
967	return 0;
968}
969
970int
971pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
972{
973	u8 pin;
974
975	pin = dev->pin;
976	if (!pin)
977		return -1;
978	pin--;
979	while (dev->bus->self) {
980		pin = (pin + PCI_SLOT(dev->devfn)) % 4;
981		dev = dev->bus->self;
982	}
983	*bridge = dev;
984	return pin;
985}
986
987/**
988 *	pci_release_region - Release a PCI bar
989 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
990 *	@bar: BAR to release
991 *
992 *	Releases the PCI I/O and memory resources previously reserved by a
993 *	successful call to pci_request_region.  Call this function only
994 *	after all use of the PCI regions has ceased.
995 */
996void pci_release_region(struct pci_dev *pdev, int bar)
997{
998	struct pci_devres *dr;
999
1000	if (pci_resource_len(pdev, bar) == 0)
1001		return;
1002	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1003		release_region(pci_resource_start(pdev, bar),
1004				pci_resource_len(pdev, bar));
1005	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1006		release_mem_region(pci_resource_start(pdev, bar),
1007				pci_resource_len(pdev, bar));
1008
1009	dr = find_pci_dr(pdev);
1010	if (dr)
1011		dr->region_mask &= ~(1 << bar);
1012}
1013
1014/**
1015 *	pci_request_region - Reserved PCI I/O and memory resource
1016 *	@pdev: PCI device whose resources are to be reserved
1017 *	@bar: BAR to be reserved
1018 *	@res_name: Name to be associated with resource.
1019 *
1020 *	Mark the PCI region associated with PCI device @pdev BR @bar as
1021 *	being reserved by owner @res_name.  Do not access any
1022 *	address inside the PCI regions unless this call returns
1023 *	successfully.
1024 *
1025 *	Returns 0 on success, or %EBUSY on error.  A warning
1026 *	message is also printed on failure.
1027 */
1028int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1029{
1030	struct pci_devres *dr;
1031
1032	if (pci_resource_len(pdev, bar) == 0)
1033		return 0;
1034
1035	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
1036		if (!request_region(pci_resource_start(pdev, bar),
1037			    pci_resource_len(pdev, bar), res_name))
1038			goto err_out;
1039	}
1040	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1041		if (!request_mem_region(pci_resource_start(pdev, bar),
1042				        pci_resource_len(pdev, bar), res_name))
1043			goto err_out;
1044	}
1045
1046	dr = find_pci_dr(pdev);
1047	if (dr)
1048		dr->region_mask |= 1 << bar;
1049
1050	return 0;
1051
1052err_out:
1053	printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx "
1054		"for device %s\n",
1055		pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1056		bar + 1, /* PCI BAR # */
1057		(unsigned long long)pci_resource_len(pdev, bar),
1058		(unsigned long long)pci_resource_start(pdev, bar),
1059		pci_name(pdev));
1060	return -EBUSY;
1061}
1062
1063/**
1064 * pci_release_selected_regions - Release selected PCI I/O and memory resources
1065 * @pdev: PCI device whose resources were previously reserved
1066 * @bars: Bitmask of BARs to be released
1067 *
1068 * Release selected PCI I/O and memory resources previously reserved.
1069 * Call this function only after all use of the PCI regions has ceased.
1070 */
1071void pci_release_selected_regions(struct pci_dev *pdev, int bars)
1072{
1073	int i;
1074
1075	for (i = 0; i < 6; i++)
1076		if (bars & (1 << i))
1077			pci_release_region(pdev, i);
1078}
1079
1080/**
1081 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
1082 * @pdev: PCI device whose resources are to be reserved
1083 * @bars: Bitmask of BARs to be requested
1084 * @res_name: Name to be associated with resource
1085 */
1086int pci_request_selected_regions(struct pci_dev *pdev, int bars,
1087				 const char *res_name)
1088{
1089	int i;
1090
1091	for (i = 0; i < 6; i++)
1092		if (bars & (1 << i))
1093			if(pci_request_region(pdev, i, res_name))
1094				goto err_out;
1095	return 0;
1096
1097err_out:
1098	while(--i >= 0)
1099		if (bars & (1 << i))
1100			pci_release_region(pdev, i);
1101
1102	return -EBUSY;
1103}
1104
1105/**
1106 *	pci_release_regions - Release reserved PCI I/O and memory resources
1107 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
1108 *
1109 *	Releases all PCI I/O and memory resources previously reserved by a
1110 *	successful call to pci_request_regions.  Call this function only
1111 *	after all use of the PCI regions has ceased.
1112 */
1113
1114void pci_release_regions(struct pci_dev *pdev)
1115{
1116	pci_release_selected_regions(pdev, (1 << 6) - 1);
1117}
1118
1119/**
1120 *	pci_request_regions - Reserved PCI I/O and memory resources
1121 *	@pdev: PCI device whose resources are to be reserved
1122 *	@res_name: Name to be associated with resource.
1123 *
1124 *	Mark all PCI regions associated with PCI device @pdev as
1125 *	being reserved by owner @res_name.  Do not access any
1126 *	address inside the PCI regions unless this call returns
1127 *	successfully.
1128 *
1129 *	Returns 0 on success, or %EBUSY on error.  A warning
1130 *	message is also printed on failure.
1131 */
1132int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1133{
1134	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1135}
1136
1137/**
1138 * pci_set_master - enables bus-mastering for device dev
1139 * @dev: the PCI device to enable
1140 *
1141 * Enables bus-mastering on the device and calls pcibios_set_master()
1142 * to do the needed arch specific settings.
1143 */
1144void
1145pci_set_master(struct pci_dev *dev)
1146{
1147	u16 cmd;
1148
1149	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1150	if (! (cmd & PCI_COMMAND_MASTER)) {
1151		pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
1152		cmd |= PCI_COMMAND_MASTER;
1153		pci_write_config_word(dev, PCI_COMMAND, cmd);
1154	}
1155	dev->is_busmaster = 1;
1156	pcibios_set_master(dev);
1157}
1158
1159#ifdef PCI_DISABLE_MWI
1160int pci_set_mwi(struct pci_dev *dev)
1161{
1162	return 0;
1163}
1164
1165void pci_clear_mwi(struct pci_dev *dev)
1166{
1167}
1168
1169#else
1170
1171#ifndef PCI_CACHE_LINE_BYTES
1172#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
1173#endif
1174
1175/* This can be overridden by arch code. */
1176/* Don't forget this is measured in 32-bit words, not bytes */
1177u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
1178
1179/**
1180 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
1181 * @dev: the PCI device for which MWI is to be enabled
1182 *
1183 * Helper function for pci_set_mwi.
1184 * Originally copied from drivers/net/acenic.c.
1185 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
1186 *
1187 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1188 */
1189static int
1190pci_set_cacheline_size(struct pci_dev *dev)
1191{
1192	u8 cacheline_size;
1193
1194	if (!pci_cache_line_size)
1195		return -EINVAL;		/* The system doesn't support MWI. */
1196
1197	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
1198	   equal to or multiple of the right value. */
1199	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
1200	if (cacheline_size >= pci_cache_line_size &&
1201	    (cacheline_size % pci_cache_line_size) == 0)
1202		return 0;
1203
1204	/* Write the correct value. */
1205	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
1206	/* Read it back. */
1207	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
1208	if (cacheline_size == pci_cache_line_size)
1209		return 0;
1210
1211	printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
1212	       "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
1213
1214	return -EINVAL;
1215}
1216
1217/**
1218 * pci_set_mwi - enables memory-write-invalidate PCI transaction
1219 * @dev: the PCI device for which MWI is enabled
1220 *
1221 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
1222 * and then calls @pcibios_set_mwi to do the needed arch specific
1223 * operations or a generic mwi-prep function.
1224 *
1225 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1226 */
1227int
1228pci_set_mwi(struct pci_dev *dev)
1229{
1230	int rc;
1231	u16 cmd;
1232
1233	rc = pci_set_cacheline_size(dev);
1234	if (rc)
1235		return rc;
1236
1237	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1238	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
1239		pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
1240		cmd |= PCI_COMMAND_INVALIDATE;
1241		pci_write_config_word(dev, PCI_COMMAND, cmd);
1242	}
1243
1244	return 0;
1245}
1246
1247/**
1248 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
1249 * @dev: the PCI device to disable
1250 *
1251 * Disables PCI Memory-Write-Invalidate transaction on the device
1252 */
1253void
1254pci_clear_mwi(struct pci_dev *dev)
1255{
1256	u16 cmd;
1257
1258	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1259	if (cmd & PCI_COMMAND_INVALIDATE) {
1260		cmd &= ~PCI_COMMAND_INVALIDATE;
1261		pci_write_config_word(dev, PCI_COMMAND, cmd);
1262	}
1263}
1264#endif /* ! PCI_DISABLE_MWI */
1265
1266/**
1267 * pci_intx - enables/disables PCI INTx for device dev
1268 * @pdev: the PCI device to operate on
1269 * @enable: boolean: whether to enable or disable PCI INTx
1270 *
1271 * Enables/disables PCI INTx for device dev
1272 */
1273void
1274pci_intx(struct pci_dev *pdev, int enable)
1275{
1276	u16 pci_command, new;
1277
1278	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1279
1280	if (enable) {
1281		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
1282	} else {
1283		new = pci_command | PCI_COMMAND_INTX_DISABLE;
1284	}
1285
1286	if (new != pci_command) {
1287		struct pci_devres *dr;
1288
1289		pci_write_config_word(pdev, PCI_COMMAND, new);
1290
1291		dr = find_pci_dr(pdev);
1292		if (dr && !dr->restore_intx) {
1293			dr->restore_intx = 1;
1294			dr->orig_intx = !enable;
1295		}
1296	}
1297}
1298
1299/**
1300 * pci_msi_off - disables any msi or msix capabilities
1301 * @dev: the PCI device to operate on
1302 *
1303 * If you want to use msi see pci_enable_msi and friends.
1304 * This is a lower level primitive that allows us to disable
1305 * msi operation at the device level.
1306 */
1307void pci_msi_off(struct pci_dev *dev)
1308{
1309	int pos;
1310	u16 control;
1311
1312	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1313	if (pos) {
1314		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
1315		control &= ~PCI_MSI_FLAGS_ENABLE;
1316		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
1317	}
1318	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1319	if (pos) {
1320		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
1321		control &= ~PCI_MSIX_FLAGS_ENABLE;
1322		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
1323	}
1324}
1325
1326#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
1327/*
1328 * These can be overridden by arch-specific implementations
1329 */
1330int
1331pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1332{
1333	if (!pci_dma_supported(dev, mask))
1334		return -EIO;
1335
1336	dev->dma_mask = mask;
1337
1338	return 0;
1339}
1340
1341int
1342pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
1343{
1344	if (!pci_dma_supported(dev, mask))
1345		return -EIO;
1346
1347	dev->dev.coherent_dma_mask = mask;
1348
1349	return 0;
1350}
1351#endif
1352
1353/**
1354 * pci_select_bars - Make BAR mask from the type of resource
1355 * @dev: the PCI device for which BAR mask is made
1356 * @flags: resource type mask to be selected
1357 *
1358 * This helper routine makes bar mask from the type of resource.
1359 */
1360int pci_select_bars(struct pci_dev *dev, unsigned long flags)
1361{
1362	int i, bars = 0;
1363	for (i = 0; i < PCI_NUM_RESOURCES; i++)
1364		if (pci_resource_flags(dev, i) & flags)
1365			bars |= (1 << i);
1366	return bars;
1367}
1368
1369static int __devinit pci_init(void)
1370{
1371	struct pci_dev *dev = NULL;
1372
1373	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1374		pci_fixup_device(pci_fixup_final, dev);
1375	}
1376	return 0;
1377}
1378
1379static int __devinit pci_setup(char *str)
1380{
1381	while (str) {
1382		char *k = strchr(str, ',');
1383		if (k)
1384			*k++ = 0;
1385		if (*str && (str = pcibios_setup(str)) && *str) {
1386			if (!strcmp(str, "nomsi")) {
1387				pci_no_msi();
1388			} else if (!strncmp(str, "cbiosize=", 9)) {
1389				pci_cardbus_io_size = memparse(str + 9, &str);
1390			} else if (!strncmp(str, "cbmemsize=", 10)) {
1391				pci_cardbus_mem_size = memparse(str + 10, &str);
1392			} else {
1393				printk(KERN_ERR "PCI: Unknown option `%s'\n",
1394						str);
1395			}
1396		}
1397		str = k;
1398	}
1399	return 0;
1400}
1401early_param("pci", pci_setup);
1402
1403device_initcall(pci_init);
1404
1405EXPORT_SYMBOL_GPL(pci_restore_bars);
1406EXPORT_SYMBOL(pci_enable_device_bars);
1407EXPORT_SYMBOL(pci_enable_device);
1408EXPORT_SYMBOL(pcim_enable_device);
1409EXPORT_SYMBOL(pcim_pin_device);
1410EXPORT_SYMBOL(pci_disable_device);
1411EXPORT_SYMBOL(pci_find_capability);
1412EXPORT_SYMBOL(pci_bus_find_capability);
1413EXPORT_SYMBOL(pci_release_regions);
1414EXPORT_SYMBOL(pci_request_regions);
1415EXPORT_SYMBOL(pci_release_region);
1416EXPORT_SYMBOL(pci_request_region);
1417EXPORT_SYMBOL(pci_release_selected_regions);
1418EXPORT_SYMBOL(pci_request_selected_regions);
1419EXPORT_SYMBOL(pci_set_master);
1420EXPORT_SYMBOL(pci_set_mwi);
1421EXPORT_SYMBOL(pci_clear_mwi);
1422EXPORT_SYMBOL_GPL(pci_intx);
1423EXPORT_SYMBOL(pci_set_dma_mask);
1424EXPORT_SYMBOL(pci_set_consistent_dma_mask);
1425EXPORT_SYMBOL(pci_assign_resource);
1426EXPORT_SYMBOL(pci_find_parent_resource);
1427EXPORT_SYMBOL(pci_select_bars);
1428
1429EXPORT_SYMBOL(pci_set_power_state);
1430EXPORT_SYMBOL(pci_save_state);
1431EXPORT_SYMBOL(pci_restore_state);
1432EXPORT_SYMBOL(pci_enable_wake);
1433EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1434