Upgrade openssh. 1/2
[dragonfly.git] / test / debug / vmpqinactive.c
1 /*
2  * VMPQINACTIVE.C
3  *
4  * cc -I/usr/src/sys vmpqinactive.c -o ~/bin/vmpqinactive -lkvm
5  *
6  * vmpqinactive
7  *
8  * Calculate how many inactive pages are dirty
9  *
10  * Copyright (c) 2004-2020 The DragonFly Project.  All rights reserved.
11  *
12  * This code is derived from software contributed to The DragonFly Project
13  * by Matthew Dillon <dillon@backplane.com>
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  *
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in
23  *    the documentation and/or other materials provided with the
24  *    distribution.
25  * 3. Neither the name of The DragonFly Project nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific, prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
32  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
33  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
35  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
37  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
38  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
39  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  */
42
43 #define _KERNEL_STRUCTURES_
44 #include <sys/param.h>
45 #include <sys/user.h>
46 #include <sys/malloc.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/namecache.h>
51 #include <sys/slaballoc.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_object.h>
57 #include <vm/swap_pager.h>
58 #include <vm/vnode_pager.h>
59
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <string.h>
63 #include <fcntl.h>
64 #include <kvm.h>
65 #include <nlist.h>
66 #include <getopt.h>
67
68 struct nlist Nl[] = {
69     { "_vm_page_array" },
70     { "_vm_page_array_size" },
71     { "_kernel_object" },
72     { "_nbuf" },
73     { "_nswbuf_mem" },
74     { "_nswbuf_kva" },
75     { "_nswbuf_raw" },
76     { "_kernbase" },
77     { "__end" },
78     { NULL }
79 };
80
81 int debugopt;
82 int verboseopt;
83 #if 0
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
86 #endif
87 struct vm_page *vm_page_array;
88 struct vm_object *kernel_object_ptr;
89 int vm_page_array_size;
90 long nbuf;
91 long nswbuf_mem;
92 long nswbuf_kva;
93 long nswbuf_raw;
94 long kern_size;
95
96 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
97 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
98 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
99 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
100
101 #if 0
102 static void addsltrack(vm_page_t m);
103 static void dumpsltrack(kvm_t *kd);
104 #endif
105 static int unique_object(void *ptr);
106
107 long count_free;
108 long count_wired;               /* total */
109 long count_wired_vnode;
110 long count_wired_anon;
111 long count_wired_in_pmap;
112 long count_wired_pgtable;
113 long count_wired_other;
114 long count_wired_kernel;
115 long count_wired_obj_other;
116
117 long count_anon;
118 long count_anon_in_pmap;
119 long count_vnode;
120 long count_device;
121 long count_phys;
122 long count_kernel;
123 long count_unknown;
124 long count_noobj_offqueue;
125 long count_noobj_onqueue;
126
127 int
128 main(int ac, char **av)
129 {
130     const char *corefile = NULL;
131     const char *sysfile = NULL;
132     struct vm_page m;
133     struct vm_object obj;
134     kvm_t *kd;
135     int ch;
136 #if 0
137     vm_page_t mptr;
138     int hv;
139 #endif
140     int i;
141     const char *qstr;
142     const char *ostr;
143     long pqinactive_clean;
144     long pqinactive_dirty1;
145     long pqinactive_dirty2;
146     long pqinactive_refd;
147     long pqinactive_ready;
148
149     pqinactive_clean = 0;
150     pqinactive_dirty1 = 0;
151     pqinactive_dirty2 = 0;
152     pqinactive_refd = 0;
153     pqinactive_ready = 0;
154
155     while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
156         switch(ch) {
157         case 'd':
158             ++debugopt;
159             break;
160         case 'v':
161             ++verboseopt;
162             break;
163         case 'M':
164             corefile = optarg;
165             break;
166         case 'N':
167             sysfile = optarg;
168             break;
169         default:
170             fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
171             exit(1);
172         }
173     }
174     ac -= optind;
175     av += optind;
176
177     if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
178         perror("kvm_open");
179         exit(1);
180     }
181     if (kvm_nlist(kd, Nl) != 0) {
182         perror("kvm_nlist");
183         exit(1);
184     }
185
186     kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
187     kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
188     kernel_object_ptr = (void *)Nl[2].n_value;
189     kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
190     kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
191     kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
192     kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
193     kern_size = Nl[8].n_value - Nl[7].n_value;
194
195     /*
196      * Scan the vm_page_array validating all pages with associated objects
197      */
198     for (i = 0; i < vm_page_array_size; ++i) {
199         if (debugopt && (i & 1023) == 0) {
200             printf("page %d/%d\r", i, vm_page_array_size);
201             fflush(stdout);
202         }
203         kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
204         if (m.object) {
205             kkread(kd, (u_long)m.object, &obj, sizeof(obj));
206             checkpage(kd, &vm_page_array[i], &m, &obj);
207         }
208         if (m.queue >= PQ_HOLD) {
209             qstr = "HOLD";
210         } else if (m.queue >= PQ_CACHE) {
211             qstr = "CACHE";
212         } else if (m.queue >= PQ_ACTIVE) {
213             qstr = "ACTIVE";
214         } else if (m.queue >= PQ_INACTIVE) {
215             qstr = "INACTIVE";
216             if (m.dirty || m.wire_count || m.busy_count || m.hold_count ||
217                 (m.flags & PG_NEED_COMMIT)) {
218                     if (m.flags & PG_WINATCFLS)
219                             ++pqinactive_dirty2;
220                     else
221                             ++pqinactive_dirty1;
222             } else {
223                     ++pqinactive_clean;
224                     if (m.flags & PG_REFERENCED)
225                             ++pqinactive_refd;
226                     else
227                             ++pqinactive_ready;
228             }
229         } else if (m.queue >= PQ_FREE) {
230             qstr = "FREE";
231             ++count_free;
232         } else {
233             qstr = "NONE";
234         }
235         if (m.wire_count) {
236                 ++count_wired;
237                 if (m.object == NULL) {
238                         if ((m.flags & PG_MAPPED) &&
239                             (m.flags & PG_WRITEABLE) &&
240                             (m.flags & PG_UNQUEUED)) {
241                                 ++count_wired_pgtable;
242                         } else {
243                                 ++count_wired_other;
244                         }
245                 } else if (m.object == kernel_object_ptr) {
246                         ++count_wired_kernel;
247                 } else {
248                         switch(obj.type) {
249                         case OBJT_VNODE:
250                                 ++count_wired_vnode;
251                                 break;
252                         case OBJT_DEFAULT:
253                         case OBJT_SWAP:
254 #ifdef PMAP_ADVANCED
255                                 if (m.flags & PG_MAPPED)
256 #else
257                                 if (m.md.pmap_count)
258 #endif
259                                         ++count_wired_in_pmap;
260                                 else
261                                         ++count_wired_anon;
262                                 break;
263                         default:
264                                 ++count_wired_obj_other;
265                                 break;
266                         }
267                 }
268         } else
269 #ifdef PMAP_ADVANCED
270         if (m.flags & PG_MAPPED) {
271 #else
272         if (m.md.pmap_count) {
273 #endif
274                 if (m.object && m.object != kernel_object_ptr) {
275                         switch(obj.type) {
276                         case OBJT_DEFAULT:
277                         case OBJT_SWAP:
278                                 ++count_anon_in_pmap;
279                                 break;
280                         default:
281                                 break;
282                         }
283                 }
284         }
285
286         if (verboseopt) {
287             printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
288                    "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
289                 &vm_page_array[i],
290                 m.object,
291                 (intmax_t)m.pindex,
292                 (intmax_t)m.pindex * PAGE_SIZE,
293                 m.valid,
294                 m.dirty,
295                 m.hold_count,
296                 m.wire_count,
297                 m.act_count,
298                 m.busy_count,
299 #ifdef PMAP_ADVANCED
300                 ((m.flags & PG_WRITEABLE) != 0),
301                 ((m.flags & PG_MAPPED) != 0),
302 #else
303                 m.md.writeable_count,
304                 m.md.pmap_count,
305 #endif
306                 qstr
307             );
308         }
309
310         if (m.object == kernel_object_ptr) {
311                 ostr = "kernel";
312                 if (unique_object(m.object))
313                         count_kernel += obj.resident_page_count;
314         } else if (m.object) {
315             switch(obj.type) {
316             case OBJT_DEFAULT:
317                 ostr = "default";
318                 if (unique_object(m.object))
319                         count_anon += obj.resident_page_count;
320                 break;
321             case OBJT_SWAP:
322                 ostr = "swap";
323                 if (unique_object(m.object))
324                         count_anon += obj.resident_page_count;
325                 break;
326             case OBJT_VNODE:
327                 ostr = "vnode";
328                 if (unique_object(m.object))
329                         count_vnode += obj.resident_page_count;
330                 break;
331             case OBJT_DEVICE:
332                 ostr = "device";
333                 if (unique_object(m.object))
334                         count_device += obj.resident_page_count;
335                 break;
336             case OBJT_PHYS:
337                 ostr = "phys";
338                 if (unique_object(m.object))
339                         count_phys += obj.resident_page_count;
340                 break;
341             case OBJT_DEAD:
342                 ostr = "dead";
343                 if (unique_object(m.object))
344                         count_unknown += obj.resident_page_count;
345                 break;
346             default:
347                 if (unique_object(m.object))
348                         count_unknown += obj.resident_page_count;
349                 ostr = "unknown";
350                 break;
351             }
352         } else {
353             ostr = "-";
354             if (m.queue == PQ_NONE)
355                     ++count_noobj_offqueue;
356             else if (m.queue - m.pc != PQ_FREE)
357                     ++count_noobj_onqueue;
358         }
359
360         if (verboseopt) {
361             printf(" %-7s", ostr);
362             if (m.busy_count & PBUSY_LOCKED)
363                 printf(" BUSY");
364             if (m.busy_count & PBUSY_WANTED)
365                 printf(" WANTED");
366             if (m.flags & PG_WINATCFLS)
367                 printf(" WINATCFLS");
368             if (m.flags & PG_FICTITIOUS)
369                 printf(" FICTITIOUS");
370             if (m.flags & PG_WRITEABLE)
371                 printf(" WRITEABLE");
372             if (m.flags & PG_MAPPED)
373                 printf(" MAPPED");
374             if (m.flags & PG_NEED_COMMIT)
375                 printf(" NEED_COMMIT");
376             if (m.flags & PG_REFERENCED)
377                 printf(" REFERENCED");
378             if (m.flags & PG_CLEANCHK)
379                 printf(" CLEANCHK");
380             if (m.busy_count & PBUSY_SWAPINPROG)
381                 printf(" SWAPINPROG");
382             if (m.flags & PG_NOSYNC)
383                 printf(" NOSYNC");
384             if (m.flags & PG_UNQUEUED)
385                 printf(" UNQUEUED");
386             if (m.flags & PG_MARKER)
387                 printf(" MARKER");
388             if (m.flags & PG_RAM)
389                 printf(" RAM");
390             if (m.flags & PG_SWAPPED)
391                 printf(" SWAPPED");
392 #if 0
393             if (m.flags & PG_SLAB)
394                 printf(" SLAB");
395 #endif
396             printf("\n");
397 #if 0
398             if (m.flags & PG_SLAB)
399                 addsltrack(&m);
400 #endif
401         }
402     }
403     if (debugopt || verboseopt)
404         printf("\n");
405     printf("%8.2fM free\n",
406         count_free * 4096.0 / 1048576.0);
407     printf("%8.2fM inactive-clean\n",
408         pqinactive_clean * 4096.0 / 1048576.0);
409     printf("%8.2fM inactive-clean-and-referenced\n",
410         pqinactive_refd * 4096.0 / 1048576.0);
411     printf("%8.2fM inactive-clean-and-ready\n",
412         pqinactive_ready * 4096.0 / 1048576.0);
413     printf("%8.2fM inactive-dirty/first-LRU\n",
414         pqinactive_dirty1 * 4096.0 / 1048576.0);
415     printf("%8.2fM inactive-dirty/second-LRU\n",
416         pqinactive_dirty2 * 4096.0 / 1048576.0);
417
418     printf("%8.2fM wired vnode (in buffer cache)\n",
419         count_wired_vnode * 4096.0 / 1048576.0);
420     printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
421         count_wired_in_pmap * 4096.0 / 1048576.0);
422     printf("%8.2fM wired pgtable\n",
423         count_wired_pgtable * 4096.0 / 1048576.0);
424     printf("%8.2fM wired anon\n",
425         count_wired_anon * 4096.0 / 1048576.0);
426     printf("%8.2fM wired kernel_object\n",
427         count_wired_kernel * 4096.0 / 1048576.0);
428
429         printf("\t%8.2fM vm_page_array\n",
430             vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
431         printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
432             (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
433             sizeof(struct buf) / 1048576.0);
434         printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
435         printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
436
437     printf("%8.2fM wired other (unknown object)\n",
438         count_wired_obj_other * 4096.0 / 1048576.0);
439     printf("%8.2fM wired other (no object, probably kernel)\n",
440         count_wired_other * 4096.0 / 1048576.0);
441
442     printf("%8.2fM WIRED TOTAL\n",
443         count_wired * 4096.0 / 1048576.0);
444
445     printf("\n");
446     printf("%8.2fM anonymous (total, includes in-pmap)\n",
447         count_anon * 4096.0 / 1048576.0);
448     printf("%8.2fM anonymous memory in-pmap\n",
449         count_anon_in_pmap * 4096.0 / 1048576.0);
450     printf("%8.2fM vnode (includes wired)\n",
451         count_vnode * 4096.0 / 1048576.0);
452     printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
453     printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
454     printf("%8.2fM kernel (includes wired)\n",
455         count_kernel * 4096.0 / 1048576.0);
456     printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
457     printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
458         count_noobj_offqueue * 4096.0 / 1048576.0);
459     printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
460         count_noobj_onqueue * 4096.0 / 1048576.0);
461
462 #if 0
463     /*
464      * Scan the vm_page_buckets array validating all pages found
465      */
466     for (i = 0; i <= vm_page_hash_mask; ++i) {
467         if (debugopt) {
468             printf("index %d\r", i);
469             fflush(stdout);
470         }
471         kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
472         while (mptr) {
473             kkread(kd, (u_long)mptr, &m, sizeof(m));
474             if (m.object) {
475                 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
476                 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
477                 hv &= vm_page_hash_mask;
478                 if (i != hv)
479                     printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
480                         " should be in bucket %d\n", i, mptr, hv);
481                 checkpage(kd, mptr, &m, &obj);
482             } else {
483                 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
484                         " has no object\n", i, mptr);
485             }
486             mptr = m.hnext;
487         }
488     }
489 #endif
490     if (debugopt)
491         printf("\n");
492 #if 0
493     dumpsltrack(kd);
494 #endif
495     return(0);
496 }
497
498 /*
499  * A page with an object.
500  */
501 void
502 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
503 {
504 #if 0
505     struct vm_page scan;
506     vm_page_t scanptr;
507     int hv;
508
509     hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
510     hv &= vm_page_hash_mask;
511     kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
512     while (scanptr) {
513         if (scanptr == mptr)
514             break;
515         kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
516         scanptr = scan.hnext;
517     }
518     if (scanptr) {
519         if (debugopt > 1)
520             printf("good checkpage %p bucket %d\n", mptr, hv);
521     } else {
522         printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
523                 " page not found in bucket list\n", hv, mptr);
524     }
525 #endif
526 }
527
528 /*
529  * Acclerate the reading of VM pages
530  */
531 #define VPCACHE_SIZE    65536
532
533 static void
534 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
535 {
536     static struct vm_page vpcache[VPCACHE_SIZE];
537     static u_long vpbeg;
538     static u_long vpend;
539
540     if (addr < vpbeg || addr >= vpend) {
541         vpbeg = addr;
542         vpend = addr + VPCACHE_SIZE * sizeof(*m);
543         if (vpend > (u_long)(uintptr_t)vm_page_array +
544                     vm_page_array_size * sizeof(*m)) {
545             vpend = (u_long)(uintptr_t)vm_page_array +
546                     vm_page_array_size * sizeof(*m);
547         }
548         kkread(kd, vpbeg, vpcache, vpend - vpbeg);
549     }
550     *m = vpcache[(addr - vpbeg) / sizeof(*m)];
551 }
552
553 static void
554 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
555 {
556     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
557         perror("kvm_read");
558         exit(1);
559     }
560 }
561
562 static int
563 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
564 {
565     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
566         return 1;
567     }
568     return 0;
569 }
570
571 struct SLTrack {
572         struct SLTrack *next;
573         u_long addr;
574 };
575
576 #define SLHSIZE 1024
577 #define SLHMASK (SLHSIZE - 1)
578
579 struct SLTrack *SLHash[SLHSIZE];
580
581 #if 0
582 static
583 void
584 addsltrack(vm_page_t m)
585 {
586         struct SLTrack *slt;
587         u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
588         int i;
589
590         if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
591             m->object == NULL)
592                 return;
593
594         i = (addr / 131072) & SLHMASK;
595         for (slt = SLHash[i]; slt; slt = slt->next) {
596                 if (slt->addr == addr)
597                         break;
598         }
599         if (slt == NULL) {
600                 slt = malloc(sizeof(*slt));
601                 slt->addr = addr;
602                 slt->next = SLHash[i];
603                 SLHash[i] = slt;
604         }
605 }
606 #endif
607
608 static
609 void
610 dumpsltrack(kvm_t *kd)
611 {
612         struct SLTrack *slt;
613         int i;
614         long total_zones = 0;
615         long full_zones = 0;
616
617         for (i = 0; i < SLHSIZE; ++i) {
618                 for (slt = SLHash[i]; slt; slt = slt->next) {
619                         SLZone z;
620
621                         if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
622                                 printf("SLZone 0x%016lx not mapped\n",
623                                         slt->addr);
624                                 continue;
625                         }
626                         printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
627                                "chunksz=%-5d }\n",
628                                slt->addr,
629                                z.z_Magic,
630                                z.z_Cpu,
631                                z.z_NFree,
632                                z.z_ChunkSize
633                         );
634                         ++total_zones;
635                         if (z.z_NFree == 0)
636                                 ++full_zones;
637                 }
638         }
639         printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
640 }
641
642 #define HASH_SIZE       (1024*1024)
643 #define HASH_MASK       (HASH_SIZE - 1)
644
645 struct dup_entry {
646         struct dup_entry *next;
647         void    *ptr;
648 };
649
650 struct dup_entry *dup_hash[HASH_SIZE];
651
652 static int
653 unique_object(void *ptr)
654 {
655         struct dup_entry *hen;
656         int hv;
657
658         hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
659         hv &= HASH_MASK;
660         for (hen = dup_hash[hv]; hen; hen = hen->next) {
661                 if (hen->ptr == ptr)
662                         return 0;
663         }
664         hen = malloc(sizeof(*hen));
665         hen->next = dup_hash[hv];
666         hen->ptr = ptr;
667         dup_hash[hv] = hen;
668
669         return 1;
670 }