oce_rx.c revision 11878:ac93462db6d7
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Emulex.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Source file containing the Receive Path handling
29 * functions
30 */
31#include <oce_impl.h>
32
33
34static void rx_pool_free(char *arg);
35static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
36    struct oce_nic_rx_cqe *cqe);
37static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
38	struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
39static int oce_rq_charge(struct oce_dev *dev, struct oce_rq *rq,
40    uint32_t nbufs);
41static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
42static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
43static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
44static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
45    size_t size, int flags);
46static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
47static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
48static inline void oce_rx_drop_pkt(struct oce_rq *rq,
49    struct oce_nic_rx_cqe *cqe);
50
51
52/*
53 * function to create a DMA buffer pool for RQ
54 *
55 * dev - software handle to the device
56 * num_items - number of buffers in the pool
57 * item_size - size of each buffer
58 *
59 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
60 */
61int
62oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
63{
64	struct oce_dev *dev = rq->parent;
65	int size;
66	int cnt;
67	int ret;
68	int nitems;
69
70	nitems = rq->cfg.nbufs;
71	size = nitems * sizeof (oce_rq_bdesc_t);
72	rq->rq_bdesc_array = kmem_zalloc(size, KM_SLEEP);
73
74	/* Create the free buffer list */
75	OCE_LIST_CREATE(&rq->rq_buf_list, DDI_INTR_PRI(dev->intr_pri));
76
77	for (cnt = 0; cnt < nitems; cnt++) {
78		ret = oce_rqb_ctor(&rq->rq_bdesc_array[cnt],
79		    rq, buf_size, DDI_DMA_STREAMING);
80		if (ret != DDI_SUCCESS) {
81			goto rqb_fail;
82		}
83		OCE_LIST_INSERT_TAIL(&rq->rq_buf_list,
84		    &(rq->rq_bdesc_array[cnt].link));
85	}
86	return (DDI_SUCCESS);
87
88rqb_fail:
89	oce_rqb_cache_destroy(rq);
90	return (DDI_FAILURE);
91} /* oce_rqb_cache_create */
92
93/*
94 * function to Destroy RQ DMA buffer cache
95 *
96 * rq - pointer to rq structure
97 *
98 * return none
99 */
100void
101oce_rqb_cache_destroy(struct oce_rq *rq)
102{
103	oce_rq_bdesc_t *rqbd = NULL;
104
105	while ((rqbd = (oce_rq_bdesc_t *)OCE_LIST_REM_HEAD(&rq->rq_buf_list))
106	    != NULL) {
107		oce_rqb_dtor(rqbd);
108	}
109	kmem_free(rq->rq_bdesc_array,
110	    rq->cfg.nbufs * sizeof (oce_rq_bdesc_t));
111	OCE_LIST_DESTROY(&rq->rq_buf_list);
112} /* oce_rqb_cache_destroy */
113
114/*
115 * RQ buffer destructor function
116 *
117 * rqbd - pointer to rq buffer descriptor
118 *
119 * return none
120 */
121static	void
122oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
123{
124	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
125		return;
126	}
127	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
128	if (rqbd->mp != NULL) {
129		/* Buffer is already free  */
130		rqbd->fr_rtn.free_arg = NULL;
131		freeb(rqbd->mp);
132	}
133} /* oce_rqb_dtor */
134
135/*
136 * RQ buffer constructor function
137 *
138 * rqbd - pointer to rq buffer descriptor
139 * rq - pointer to RQ structure
140 * size - size of the buffer
141 * flags - KM_SLEEP OR KM_NOSLEEP
142 *
143 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
144 */
145static int
146oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
147{
148	struct oce_dev *dev;
149	oce_dma_buf_t *dbuf;
150
151	dev = rq->parent;
152
153	dbuf  = oce_alloc_dma_buffer(dev, size, flags);
154	if (dbuf == NULL) {
155		return (DDI_FAILURE);
156	}
157
158	/* override usable length */
159	rqbd->rqb = dbuf;
160	rqbd->rq = rq;
161	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
162	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
163	rqbd->fr_rtn.free_func = (void (*)())rx_pool_free;
164	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
165	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
166	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
167	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
168
169	return (DDI_SUCCESS);
170} /* oce_rqb_ctor */
171
172/*
173 * RQ buffer allocator function
174 *
175 * rq - pointer to RQ structure
176 *
177 * return pointer to RQ buffer descriptor
178 */
179static inline oce_rq_bdesc_t *
180oce_rqb_alloc(struct oce_rq *rq)
181{
182	oce_rq_bdesc_t *rqbd;
183	rqbd = OCE_LIST_REM_HEAD(&rq->rq_buf_list);
184	return (rqbd);
185} /* oce_rqb_alloc */
186
187/*
188 * function to free the RQ buffer
189 *
190 * rq - pointer to RQ structure
191 * rqbd - pointer to recieve buffer descriptor
192 *
193 * return none
194 */
195static inline void
196oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
197{
198	OCE_LIST_INSERT_TAIL(&rq->rq_buf_list, rqbd);
199} /* oce_rqb_free */
200
201
202/*
203 * function to charge a given rq with buffers from a pool's free list
204 *
205 * dev - software handle to the device
206 * rq - pointer to the RQ to charge
207 * nbufs - numbers of buffers to be charged
208 *
209 * return number of rqe's charges.
210 */
211static inline int
212oce_rq_charge(struct oce_dev *dev,
213    struct oce_rq *rq, uint32_t nbufs)
214{
215	struct oce_nic_rqe *rqe;
216	oce_rq_bdesc_t *rqbd;
217	struct rq_shadow_entry	*shadow_rq;
218	int32_t num_bufs = 0;
219	int32_t total_bufs = 0;
220	pd_rxulp_db_t rxdb_reg;
221	uint32_t cnt;
222
223	shadow_rq = rq->shadow_ring;
224	/* check number of slots free and recharge */
225	nbufs = ((rq->buf_avail + nbufs) > rq->cfg.q_len) ?
226	    (rq->cfg.q_len - rq->buf_avail) : nbufs;
227	for (cnt = 0; cnt < nbufs; cnt++) {
228		rqbd = oce_rqb_alloc(rq);
229		if (rqbd == NULL) {
230			oce_log(dev, CE_NOTE, MOD_RX, "%s %x",
231			    "rqb pool empty @ ticks",
232			    (uint32_t)ddi_get_lbolt());
233			break;
234		}
235		if (rqbd->mp == NULL) {
236			rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
237			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
238			if (rqbd->mp != NULL) {
239				rqbd->mp->b_rptr =
240				    (uchar_t *)rqbd->rqb->base +
241				    OCE_RQE_BUF_HEADROOM;
242			}
243
244			/*
245			 * Failed again put back the buffer and continue
246			 * loops for nbufs so its a finite loop
247			 */
248
249			if (rqbd->mp == NULL) {
250				oce_rqb_free(rq, rqbd);
251				continue;
252			}
253		}
254
255		/* fill the rqes */
256		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
257		    struct oce_nic_rqe);
258		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
259		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
260		shadow_rq[rq->ring->pidx].rqbd = rqbd;
261		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
262		RING_PUT(rq->ring, 1);
263
264		/* if we have reached the max allowed posts, post */
265		if (cnt && !(cnt % OCE_MAX_RQ_POSTS)) {
266			rxdb_reg.dw0 = 0;
267			rxdb_reg.bits.num_posted = num_bufs;
268			rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
269			OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
270			num_bufs = 0;
271		}
272		num_bufs++;
273		total_bufs++;
274	}
275
276	/* post pending bufs */
277	if (num_bufs) {
278		rxdb_reg.dw0 = 0;
279		rxdb_reg.bits.num_posted = num_bufs;
280		rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
281		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
282	}
283	atomic_add_32(&rq->buf_avail, total_bufs);
284	return (total_bufs);
285} /* oce_rq_charge */
286
287/*
288 * function to release the posted buffers
289 *
290 * rq - pointer to the RQ to charge
291 *
292 * return none
293 */
294void
295oce_rq_discharge(struct oce_rq *rq)
296{
297	oce_rq_bdesc_t *rqbd;
298	struct rq_shadow_entry *shadow_rq;
299
300	shadow_rq = rq->shadow_ring;
301	/* Free the posted buffer since RQ is destroyed already */
302	while ((int32_t)rq->buf_avail > 0) {
303		rqbd = shadow_rq[rq->ring->cidx].rqbd;
304		oce_rqb_free(rq, rqbd);
305		RING_GET(rq->ring, 1);
306		rq->buf_avail--;
307	}
308}
309/*
310 * function to process a single packet
311 *
312 * dev - software handle to the device
313 * rq - pointer to the RQ to charge
314 * cqe - Pointer to Completion Q entry
315 *
316 * return mblk pointer =>  success, NULL  => error
317 */
318static inline mblk_t *
319oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
320{
321	mblk_t *mp;
322	int pkt_len;
323	int32_t frag_cnt = 0;
324	mblk_t *mblk_prev = NULL;
325	mblk_t	*mblk_head = NULL;
326	int frag_size;
327	struct rq_shadow_entry *shadow_rq;
328	struct rq_shadow_entry *shadow_rqe;
329	oce_rq_bdesc_t *rqbd;
330
331	/* Get the relevant Queue pointers */
332	shadow_rq = rq->shadow_ring;
333	pkt_len = cqe->u0.s.pkt_size;
334	for (; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
335		shadow_rqe = &shadow_rq[rq->ring->cidx];
336		rqbd = shadow_rqe->rqbd;
337		mp = rqbd->mp;
338		if (mp == NULL)
339			return (NULL);
340		frag_size  = (pkt_len > rq->cfg.frag_size) ?
341		    rq->cfg.frag_size : pkt_len;
342		mp->b_wptr = mp->b_rptr + frag_size;
343		pkt_len   -= frag_size;
344		/* Chain the message mblks */
345		if (mblk_head == NULL) {
346			mblk_head = mblk_prev = mp;
347		} else {
348			mblk_prev->b_cont = mp;
349			mblk_prev = mp;
350		}
351		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
352		    DDI_DMA_SYNC_FORKERNEL);
353		RING_GET(rq->ring, 1);
354	}
355
356	if (mblk_head == NULL) {
357		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
358		return (NULL);
359	}
360	atomic_add_32(&rq->pending, (cqe->u0.s.num_fragments & 0x7));
361	mblk_head->b_next = NULL;
362	return (mblk_head);
363} /* oce_rx */
364
365/* ARGSUSED */
366static inline mblk_t *
367oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
368{
369	mblk_t *mp;
370	int pkt_len;
371	int alloc_len;
372	int32_t frag_cnt = 0;
373	int frag_size;
374	struct rq_shadow_entry *shadow_rq;
375	struct rq_shadow_entry *shadow_rqe;
376	oce_rq_bdesc_t *rqbd;
377	boolean_t tag_present =  B_FALSE;
378	unsigned char  *rptr;
379
380	shadow_rq = rq->shadow_ring;
381	pkt_len = cqe->u0.s.pkt_size;
382	alloc_len = pkt_len;
383
384	/* Hardware always Strips Vlan tag so insert it back */
385	if (cqe->u0.s.vlan_tag_present) {
386		alloc_len += VLAN_TAGSZ;
387		tag_present = B_TRUE;
388	}
389	mp = allocb(alloc_len, BPRI_HI);
390	if (mp == NULL)
391		return (NULL);
392	if (tag_present) {
393		/* offset the read pointer by 4 bytes to insert tag */
394		mp->b_rptr += VLAN_TAGSZ;
395	}
396	rptr = mp->b_rptr;
397	mp->b_wptr = mp->b_wptr + alloc_len;
398
399	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
400		shadow_rqe = &shadow_rq[rq->ring->cidx];
401		rqbd = shadow_rqe->rqbd;
402		frag_size  = (pkt_len > rq->cfg.frag_size) ?
403		    rq->cfg.frag_size : pkt_len;
404		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
405		    DDI_DMA_SYNC_FORKERNEL);
406		bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM,
407		    rptr, frag_size);
408		rptr += frag_size;
409		pkt_len   -= frag_size;
410		oce_rqb_free(rq, rqbd);
411		RING_GET(rq->ring, 1);
412	}
413	return (mp);
414}
415
416static inline void
417oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
418{
419	int csum_flags = 0;
420
421	/* set flags */
422	if (cqe->u0.s.ip_cksum_pass) {
423		csum_flags |= HCK_IPV4_HDRCKSUM_OK;
424	}
425
426	if (cqe->u0.s.l4_cksum_pass) {
427		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
428	}
429
430	if (csum_flags) {
431		(void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
432	}
433}
434
435static inline void
436oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
437{
438	struct ether_vlan_header *ehp;
439
440	(void) memmove(mp->b_rptr - VLAN_TAGSZ,
441	    mp->b_rptr, 2 * ETHERADDRL);
442	mp->b_rptr -= VLAN_TAGSZ;
443	ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
444	ehp->ether_tpid = htons(ETHERTYPE_VLAN);
445	ehp->ether_tci = LE_16(vtag);
446}
447
448
449
450/*
451 * function to process a Recieve queue
452 *
453 * arg - pointer to the RQ to charge
454 *
455 * return number of cqes processed
456 */
457uint16_t
458oce_drain_rq_cq(void *arg)
459{
460	struct oce_nic_rx_cqe *cqe;
461	struct oce_rq *rq;
462	mblk_t *mp = NULL;
463	mblk_t *mblk_head  = NULL;
464	mblk_t *mblk_prev  = NULL;
465	uint16_t num_cqe = 0;
466	struct oce_cq  *cq;
467	struct oce_dev *dev;
468
469	if (arg == NULL)
470		return (0);
471
472	rq = (struct oce_rq *)arg;
473	dev = rq->parent;
474	cq = rq->cq;
475	mutex_enter(&rq->rx_lock);
476	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
477
478	/* dequeue till you reach an invalid cqe */
479	while (RQ_CQE_VALID(cqe) && (num_cqe < rq->cfg.q_len)) {
480		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
481		/* if insufficient buffers to charge then do copy */
482		if (cqe->u0.s.pkt_size < dev->rx_bcopy_limit ||
483		    OCE_LIST_SIZE(&rq->rq_buf_list) < cqe->u0.s.num_fragments) {
484			mp = oce_rx_bcopy(dev, rq, cqe);
485		} else {
486			mp = oce_rx(dev, rq, cqe);
487		}
488		if (mp != NULL) {
489			if (cqe->u0.s.vlan_tag_present) {
490				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
491			}
492			oce_set_rx_oflags(mp, cqe);
493			if (mblk_head == NULL) {
494				mblk_head = mblk_prev  = mp;
495			} else {
496				mblk_prev->b_next = mp;
497				mblk_prev = mp;
498			}
499
500		} else {
501			oce_rx_drop_pkt(rq, cqe);
502		}
503		atomic_add_32(&rq->buf_avail, -(cqe->u0.s.num_fragments & 0x7));
504		(void) oce_rq_charge(dev, rq,
505		    (cqe->u0.s.num_fragments & 0x7));
506		RQ_CQE_INVALIDATE(cqe);
507		RING_GET(cq->ring, 1);
508		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
509		    struct oce_nic_rx_cqe);
510		num_cqe++;
511	} /* for all valid CQEs */
512	mutex_exit(&rq->rx_lock);
513	if (mblk_head) {
514		mac_rx(dev->mac_handle, NULL, mblk_head);
515	}
516	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
517	return (num_cqe);
518} /* oce_drain_rq_cq */
519
520/*
521 * function to free mblk databuffer to the RQ pool
522 *
523 * arg - pointer to the receive buffer descriptor
524 *
525 * return none
526 */
527static void
528rx_pool_free(char *arg)
529{
530	oce_rq_bdesc_t *rqbd;
531	struct oce_rq  *rq;
532
533	/* During destroy, arg will be NULL */
534	if (arg == NULL) {
535		return;
536	}
537
538	/* retrieve the pointers from arg */
539	rqbd = (oce_rq_bdesc_t *)(void *)arg;
540	rq = rqbd->rq;
541
542	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
543	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
544	if (rqbd->mp != NULL) {
545		rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
546		    OCE_RQE_BUF_HEADROOM;
547	}
548	oce_rqb_free(rq, rqbd);
549	(void) atomic_add_32(&rq->pending, -1);
550} /* rx_pool_free */
551
552/*
553 * function to stop the RX
554 *
555 * rq - pointer to RQ structure
556 *
557 * return none
558 */
559void
560oce_clean_rq(struct oce_rq *rq)
561{
562	uint16_t num_cqe = 0;
563	struct oce_cq  *cq;
564	struct oce_dev *dev;
565	struct oce_nic_rx_cqe *cqe;
566	int32_t ti = 0;
567
568	dev = rq->parent;
569	cq = rq->cq;
570	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
571	/* dequeue till you reach an invalid cqe */
572	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
573
574		while (RQ_CQE_VALID(cqe)) {
575			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
576			oce_rx_drop_pkt(rq, cqe);
577			atomic_add_32(&rq->buf_avail,
578			    -(cqe->u0.s.num_fragments & 0x7));
579			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
580			RQ_CQE_INVALIDATE(cqe);
581			RING_GET(cq->ring, 1);
582			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
583			    struct oce_nic_rx_cqe);
584			num_cqe++;
585		}
586		OCE_MSDELAY(1);
587	}
588#if 0
589	if (num_cqe) {
590		oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
591	}
592	/* Drain the Event queue now */
593	oce_drain_eq(rq->cq->eq);
594	return (num_cqe);
595#endif
596} /* oce_clean_rq */
597
598/*
599 * function to start  the RX
600 *
601 * rq - pointer to RQ structure
602 *
603 * return number of rqe's charges.
604 */
605int
606oce_start_rq(struct oce_rq *rq)
607{
608	int ret = 0;
609	struct oce_dev *dev = rq->parent;
610
611	(void) oce_rq_charge(dev, rq, rq->cfg.q_len);
612	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
613	return (ret);
614} /* oce_start_rq */
615
616/* Checks for pending rx buffers with Stack */
617int
618oce_rx_pending(struct oce_dev *dev)
619{
620	int ti;
621
622	for (ti = 0; ti < 200; ti++) {
623		if (dev->rq[0]->pending > 0) {
624			OCE_MSDELAY(1);
625			continue;
626		} else {
627			dev->rq[0]->pending = 0;
628			break;
629		}
630	}
631	return (dev->rq[0]->pending);
632}
633
634static inline void
635oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
636{
637	int frag_cnt;
638	oce_rq_bdesc_t *rqbd;
639	struct rq_shadow_entry *shadow_rq;
640	shadow_rq = rq->shadow_ring;
641	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
642		rqbd = shadow_rq[rq->ring->cidx].rqbd;
643		oce_rqb_free(rq, rqbd);
644		RING_GET(rq->ring, 1);
645	}
646}
647