HAMMER 40A/Many: Inode/link-count sequencer.
[dragonfly.git] / sys / vfs / hammer / hammer_ioctl.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.10 2008/04/27 00:45:37 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 static int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
40                                 struct hammer_ioc_prune *prune);
41 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
42                                 struct hammer_ioc_history *hist);
43
44 int
45 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
46              struct ucred *cred)
47 {
48         struct hammer_transaction trans;
49         int error;
50
51         error = suser_cred(cred, PRISON_ROOT);
52
53         hammer_start_transaction(&trans, ip->hmp);
54
55         switch(com) {
56         case HAMMERIOC_PRUNE:
57                 if (error == 0) {
58                         error = hammer_ioc_prune(&trans, ip,
59                                         (struct hammer_ioc_prune *)data);
60                 }
61                 break;
62         case HAMMERIOC_GETHISTORY:
63                 error = hammer_ioc_gethistory(&trans, ip,
64                                         (struct hammer_ioc_history *)data);
65                 break;
66         case HAMMERIOC_REBLOCK:
67                 error = hammer_ioc_reblock(&trans, ip,
68                                         (struct hammer_ioc_reblock *)data);
69                 break;
70         default:
71                 error = EOPNOTSUPP;
72                 break;
73         }
74         hammer_done_transaction(&trans);
75         return (error);
76 }
77
78 /*
79  * Iterate through the specified range of object ids and remove any
80  * deleted records that fall entirely within a prune modulo.
81  *
82  * A reverse iteration is used to prevent overlapping records from being
83  * created during the iteration due to alignments.  This also allows us
84  * to adjust alignments without blowing up the B-Tree.
85  */
86 static int check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
87                         int *realign_cre, int *realign_del);
88 static int realign_prune(struct hammer_ioc_prune *prune, hammer_cursor_t cursor,
89                         int realign_cre, int realign_del);
90
91 static int
92 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
93                  struct hammer_ioc_prune *prune)
94 {
95         struct hammer_cursor cursor;
96         hammer_btree_elm_t elm;
97         int error;
98         int isdir;
99         int realign_cre;
100         int realign_del;
101
102         if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
103                 return(EINVAL);
104         if (prune->beg_obj_id >= prune->end_obj_id)
105                 return(EINVAL);
106         if ((prune->flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
107                 return(EINVAL);
108
109 retry:
110         error = hammer_init_cursor(trans, &cursor, NULL);
111         if (error) {
112                 hammer_done_cursor(&cursor);
113                 return(error);
114         }
115         cursor.key_beg.obj_id = prune->beg_obj_id;
116         cursor.key_beg.key = HAMMER_MIN_KEY;
117         cursor.key_beg.create_tid = 1;
118         cursor.key_beg.delete_tid = 0;
119         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
120         cursor.key_beg.obj_type = 0;
121
122         cursor.key_end.obj_id = prune->cur_obj_id;
123         cursor.key_end.key = prune->cur_key;
124         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
125         cursor.key_end.delete_tid = 0;
126         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
127         cursor.key_end.obj_type = 0;
128
129         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
130         cursor.flags |= HAMMER_CURSOR_BACKEND;
131
132         error = hammer_btree_last(&cursor);
133         while (error == 0) {
134                 elm = &cursor.node->ondisk->elms[cursor.index];
135                 prune->cur_obj_id = elm->base.obj_id;
136                 prune->cur_key = elm->base.key;
137
138                 if (prune->stat_oldest_tid > elm->leaf.base.create_tid)
139                         prune->stat_oldest_tid = elm->leaf.base.create_tid;
140
141                 if (check_prune(prune, elm, &realign_cre, &realign_del) == 0) {
142                         if (hammer_debug_general & 0x0200) {
143                                 kprintf("check %016llx %016llx: DELETE\n",
144                                         elm->base.obj_id, elm->base.key);
145                         }
146
147                         /*
148                          * NOTE: This can return EDEADLK
149                          *
150                          * Acquiring the sync lock guarantees that the
151                          * operation will not cross a synchronization
152                          * boundary (see the flusher).
153                          */
154                         isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
155
156                         hammer_lock_ex(&trans->hmp->sync_lock);
157                         error = hammer_delete_at_cursor(&cursor,
158                                                         &prune->stat_bytes);
159                         hammer_unlock(&trans->hmp->sync_lock);
160                         if (error)
161                                 break;
162
163                         if (isdir)
164                                 ++prune->stat_dirrecords;
165                         else
166                                 ++prune->stat_rawrecords;
167                 } else if (realign_cre >= 0 || realign_del >= 0) {
168                         hammer_lock_ex(&trans->hmp->sync_lock);
169                         error = realign_prune(prune, &cursor,
170                                               realign_cre, realign_del);
171                         hammer_unlock(&trans->hmp->sync_lock);
172                         if (error == 0) {
173                                 cursor.flags |= HAMMER_CURSOR_ATEDISK;
174                                 if (hammer_debug_general & 0x0200) {
175                                         kprintf("check %016llx %016llx: "
176                                                 "REALIGN\n",
177                                                 elm->base.obj_id,
178                                                 elm->base.key);
179                                 }
180                         }
181                 } else {
182                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
183                         if (hammer_debug_general & 0x0100) {
184                                 kprintf("check %016llx %016llx: SKIP\n",
185                                         elm->base.obj_id, elm->base.key);
186                         }
187                 }
188
189                 /*
190                  * Bad hack for now, don't blow out the kernel's buffer
191                  * cache.
192                  */
193                 if (trans->hmp->locked_dirty_count > hammer_limit_dirtybufs)
194                         hammer_flusher_sync(trans->hmp);
195                 error = hammer_signal_check(trans->hmp);
196                 if (error == 0)
197                         error = hammer_btree_iterate_reverse(&cursor);
198         }
199         if (error == ENOENT)
200                 error = 0;
201         hammer_done_cursor(&cursor);
202         if (error == EDEADLK)
203                 goto retry;
204         return(error);
205 }
206
207 /*
208  * Check pruning list.  The list must be sorted in descending order.
209  */
210 static int
211 check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
212             int *realign_cre, int *realign_del)
213 {
214         struct hammer_ioc_prune_elm *scan;
215         int i;
216
217         *realign_cre = -1;
218         *realign_del = -1;
219
220         /*
221          * If pruning everything remove all records with a non-zero
222          * delete_tid.
223          */
224         if (prune->flags & HAMMER_IOC_PRUNE_ALL) {
225                 if (elm->base.delete_tid != 0)
226                         return(0);
227                 return(-1);
228         }
229
230         for (i = 0; i < prune->nelms; ++i) {
231                 scan = &prune->elms[i];
232
233                 /*
234                  * Locate the scan index covering the create and delete TIDs.
235                  */
236                 if (*realign_cre < 0 &&
237                     elm->base.create_tid >= scan->beg_tid &&
238                     elm->base.create_tid < scan->end_tid) {
239                         *realign_cre = i;
240                 }
241                 if (*realign_del < 0 && elm->base.delete_tid &&
242                     elm->base.delete_tid > scan->beg_tid &&
243                     elm->base.delete_tid <= scan->end_tid) {
244                         *realign_del = i;
245                 }
246
247                 /*
248                  * Now check for loop termination.
249                  */
250                 if (elm->base.create_tid >= scan->end_tid ||
251                     elm->base.delete_tid > scan->end_tid) {
252                         break;
253                 }
254
255                 /*
256                  * Now determine if we can delete the record.
257                  */
258                 if (elm->base.delete_tid &&
259                     elm->base.create_tid >= scan->beg_tid &&
260                     elm->base.delete_tid <= scan->end_tid &&
261                     elm->base.create_tid / scan->mod_tid ==
262                     elm->base.delete_tid / scan->mod_tid) {
263                         return(0);
264                 }
265         }
266         return(-1);
267 }
268
269 /*
270  * Align the record to cover any gaps created through the deletion of
271  * records within the pruning space.  If we were to just delete the records
272  * there would be gaps which in turn would cause a snapshot that is NOT on
273  * a pruning boundary to appear corrupt to the user.  Forcing alignment
274  * of the create_tid and delete_tid for retained records 'reconnects'
275  * the previously contiguous space, making it contiguous again after the
276  * deletions.
277  *
278  * The use of a reverse iteration allows us to safely align the records and
279  * related elements without creating temporary overlaps.  XXX we should
280  * add ordering dependancies for record buffers to guarantee consistency
281  * during recovery.
282  */
283 static int
284 realign_prune(struct hammer_ioc_prune *prune,
285               hammer_cursor_t cursor, int realign_cre, int realign_del)
286 {
287         hammer_btree_elm_t elm;
288         hammer_tid_t delta;
289         hammer_tid_t mod;
290         hammer_tid_t tid;
291         int error;
292
293         hammer_cursor_downgrade(cursor);
294
295         elm = &cursor->node->ondisk->elms[cursor->index];
296         ++prune->stat_realignments;
297
298         /*
299          * Align the create_tid.  By doing a reverse iteration we guarantee
300          * that all records after our current record have already been
301          * aligned, allowing us to safely correct the right-hand-boundary
302          * (because no record to our right if otherwise exactly matching
303          * will have a create_tid to the left of our aligned create_tid).
304          *
305          * Ordering is important here XXX but disk write ordering for
306          * inter-cluster corrections is not currently guaranteed.
307          */
308         error = 0;
309         if (realign_cre >= 0) {
310                 mod = prune->elms[realign_cre].mod_tid;
311                 delta = elm->leaf.base.create_tid % mod;
312                 if (delta) {
313                         tid = elm->leaf.base.create_tid - delta + mod;
314
315                         /* can EDEADLK */
316                         error = hammer_btree_correct_rhb(cursor, tid + 1);
317                         if (error == 0) {
318                                 error = hammer_btree_extract(cursor,
319                                                      HAMMER_CURSOR_GET_RECORD);
320                         }
321                         if (error == 0) {
322                                 /* can EDEADLK */
323                                 error = hammer_cursor_upgrade(cursor);
324                         }
325                         if (error == 0) {
326                                 hammer_modify_buffer(cursor->trans,
327                                                      cursor->record_buffer,
328                                                      NULL, 0);
329                                 cursor->record->base.base.create_tid = tid;
330                                 hammer_modify_buffer_done(cursor->record_buffer);
331                                 hammer_modify_node(cursor->trans, cursor->node,
332                                                    &elm->leaf.base.create_tid,
333                                                    sizeof(elm->leaf.base.create_tid));
334                                 elm->leaf.base.create_tid = tid;
335                                 hammer_modify_node_done(cursor->node);
336                         }
337                 }
338         }
339
340         /*
341          * Align the delete_tid.  This only occurs if the record is historical
342          * was deleted at some point.  Realigning the delete_tid does not
343          * move the record within the B-Tree but may cause it to temporarily
344          * overlap a record that has not yet been pruned.
345          */
346         if (error == 0 && realign_del >= 0) {
347                 mod = prune->elms[realign_del].mod_tid;
348                 delta = elm->leaf.base.delete_tid % mod;
349                 if (delta) {
350                         error = hammer_btree_extract(cursor,
351                                                      HAMMER_CURSOR_GET_RECORD);
352                         if (error == 0) {
353                                 hammer_modify_node(cursor->trans, cursor->node,
354                                                    &elm->leaf.base.delete_tid,
355                                                    sizeof(elm->leaf.base.delete_tid));
356                                 elm->leaf.base.delete_tid =
357                                                 elm->leaf.base.delete_tid -
358                                                 delta + mod;
359                                 hammer_modify_node_done(cursor->node);
360                                 hammer_modify_buffer(cursor->trans, cursor->record_buffer, &cursor->record->base.base.delete_tid, sizeof(hammer_tid_t));
361                                 cursor->record->base.base.delete_tid =
362                                                 elm->leaf.base.delete_tid;
363                                 hammer_modify_buffer_done(cursor->record_buffer);
364                         }
365                 }
366         }
367         return (error);
368 }
369
370 /*
371  * Iterate through an object's inode or an object's records and record
372  * modification TIDs.
373  */
374 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
375                         hammer_btree_elm_t elm);
376
377 static
378 int
379 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
380                       struct hammer_ioc_history *hist)
381 {
382         struct hammer_cursor cursor;
383         hammer_btree_elm_t elm;
384         int error;
385
386         /*
387          * Validate the structure and initialize for return.
388          */
389         if (hist->beg_tid > hist->end_tid)
390                 return(EINVAL);
391         if (hist->flags & HAMMER_IOC_HISTORY_ATKEY) {
392                 if (hist->key > hist->nxt_key)
393                         return(EINVAL);
394         }
395
396         hist->obj_id = ip->obj_id;
397         hist->count = 0;
398         hist->nxt_tid = hist->end_tid;
399         hist->flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
400         hist->flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
401         hist->flags &= ~HAMMER_IOC_HISTORY_EOF;
402         hist->flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
403         if ((ip->flags & HAMMER_INODE_MODMASK) & ~HAMMER_INODE_ITIMES)
404                 hist->flags |= HAMMER_IOC_HISTORY_UNSYNCED;
405
406         /*
407          * Setup the cursor.  We can't handle undeletable records
408          * (create_tid of 0) at the moment.  A create_tid of 0 has
409          * a special meaning and cannot be specified in the cursor.
410          */
411         error = hammer_init_cursor(trans, &cursor, &ip->cache[0]);
412         if (error) {
413                 hammer_done_cursor(&cursor);
414                 return(error);
415         }
416
417         cursor.key_beg.obj_id = hist->obj_id;
418         cursor.key_beg.create_tid = hist->beg_tid;
419         cursor.key_beg.delete_tid = 0;
420         cursor.key_beg.obj_type = 0;
421         if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
422                 cursor.key_beg.create_tid = 1;
423
424         cursor.key_end.obj_id = hist->obj_id;
425         cursor.key_end.create_tid = hist->end_tid;
426         cursor.key_end.delete_tid = 0;
427         cursor.key_end.obj_type = 0;
428
429         cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;
430
431         if (hist->flags & HAMMER_IOC_HISTORY_ATKEY) {
432                 /*
433                  * key-range within the file.  For a regular file the
434                  * on-disk key represents BASE+LEN, not BASE, so the
435                  * first possible record containing the offset 'key'
436                  * has an on-disk key of (key + 1).
437                  */
438                 cursor.key_beg.key = hist->key;
439                 cursor.key_end.key = HAMMER_MAX_KEY;
440
441                 switch(ip->ino_rec.base.base.obj_type) {
442                 case HAMMER_OBJTYPE_REGFILE:
443                         ++cursor.key_beg.key;
444                         cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
445                         break;
446                 case HAMMER_OBJTYPE_DIRECTORY:
447                         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
448                         break;
449                 case HAMMER_OBJTYPE_DBFILE:
450                         cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
451                         break;
452                 default:
453                         error = EINVAL;
454                         break;
455                 }
456                 cursor.key_end.rec_type = cursor.key_beg.rec_type;
457         } else {
458                 /*
459                  * The inode itself.
460                  */
461                 cursor.key_beg.key = 0;
462                 cursor.key_end.key = 0;
463                 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
464                 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
465         }
466
467         error = hammer_btree_first(&cursor);
468         while (error == 0) {
469                 elm = &cursor.node->ondisk->elms[cursor.index];
470
471                 add_history(ip, hist, elm);
472                 if (hist->flags & (HAMMER_IOC_HISTORY_NEXT_TID |
473                                   HAMMER_IOC_HISTORY_NEXT_KEY |
474                                   HAMMER_IOC_HISTORY_EOF)) {
475                         break;
476                 }
477                 error = hammer_btree_iterate(&cursor);
478         }
479         if (error == ENOENT) {
480                 hist->flags |= HAMMER_IOC_HISTORY_EOF;
481                 error = 0;
482         }
483         hammer_done_cursor(&cursor);
484         return(error);
485 }
486
487 /*
488  * Add the scanned element to the ioctl return structure.  Some special
489  * casing is required for regular files to accomodate how data ranges are
490  * stored on-disk.
491  */
492 static void
493 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
494             hammer_btree_elm_t elm)
495 {
496         if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD)
497                 return;
498         if ((hist->flags & HAMMER_IOC_HISTORY_ATKEY) &&
499             ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_REGFILE) {
500                 /*
501                  * Adjust nxt_key
502                  */
503                 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len &&
504                     hist->key < elm->leaf.base.key - elm->leaf.data_len) {
505                         hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len;
506                 }
507                 if (hist->nxt_key > elm->leaf.base.key)
508                         hist->nxt_key = elm->leaf.base.key;
509
510                 /*
511                  * Record is beyond MAXPHYS, there won't be any more records
512                  * in the iteration covering the requested offset (key).
513                  */
514                 if (elm->leaf.base.key >= MAXPHYS &&
515                     elm->leaf.base.key - MAXPHYS > hist->key) {
516                         hist->flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
517                 }
518
519                 /*
520                  * Data-range of record does not cover the key.
521                  */
522                 if (elm->leaf.base.key - elm->leaf.data_len > hist->key)
523                         return;
524
525         } else if (hist->flags & HAMMER_IOC_HISTORY_ATKEY) {
526                 /*
527                  * Adjust nxt_key
528                  */
529                 if (hist->nxt_key > elm->leaf.base.key &&
530                     hist->key < elm->leaf.base.key) {
531                         hist->nxt_key = elm->leaf.base.key;
532                 }
533
534                 /*
535                  * Record is beyond the requested key.
536                  */
537                 if (elm->leaf.base.key > hist->key)
538                         hist->flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
539         }
540
541         /*
542          * Add create_tid if it is in-bounds.
543          */
544         if ((hist->count == 0 ||
545              elm->leaf.base.create_tid != hist->tid_ary[hist->count - 1]) &&
546             elm->leaf.base.create_tid >= hist->beg_tid &&
547             elm->leaf.base.create_tid < hist->end_tid) {
548                 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
549                         hist->nxt_tid = elm->leaf.base.create_tid;
550                         hist->flags |= HAMMER_IOC_HISTORY_NEXT_TID;
551                         return;
552                 }
553                 hist->tid_ary[hist->count++] = elm->leaf.base.create_tid;
554         }
555
556         /*
557          * Add delete_tid if it is in-bounds.  Note that different portions
558          * of the history may have overlapping data ranges with different
559          * delete_tid's.  If this case occurs the delete_tid may match the
560          * create_tid of a following record.  XXX
561          *
562          *      [        ]
563          *            [     ]
564          */
565         if (elm->leaf.base.delete_tid &&
566             elm->leaf.base.delete_tid >= hist->beg_tid &&
567             elm->leaf.base.delete_tid < hist->end_tid) {
568                 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
569                         hist->nxt_tid = elm->leaf.base.delete_tid;
570                         hist->flags |= HAMMER_IOC_HISTORY_NEXT_TID;
571                         return;
572                 }
573                 hist->tid_ary[hist->count++] = elm->leaf.base.delete_tid;
574         }
575 }
576