HAMMER 38D/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
f90dde4c 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.38 2008/04/26 02:54:00 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
38#include <sys/buf.h>
39#include <sys/buf2.h>
40
b84de5af
MD
41static int hammer_unload_inode(struct hammer_inode *ip, void *data);
42static void hammer_flush_inode_copysync(hammer_inode_t ip);
43static int hammer_mark_record_callback(hammer_record_t rec, void *data);
44
d113fda1
MD
45/*
46 * The kernel is not actively referencing this vnode but is still holding
47 * it cached.
b84de5af
MD
48 *
49 * This is called from the frontend.
d113fda1 50 */
427e5fc6
MD
51int
52hammer_vop_inactive(struct vop_inactive_args *ap)
53{
66325755 54 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 55
c0ade690
MD
56 /*
57 * Degenerate case
58 */
59 if (ip == NULL) {
66325755 60 vrecycle(ap->a_vp);
c0ade690
MD
61 return(0);
62 }
63
64 /*
65 * If the inode no longer has any references we recover its
66 * in-memory resources immediately.
b84de5af
MD
67 *
68 * NOTE: called from frontend, use ino_rec instead of sync_ino_rec.
c0ade690 69 */
d113fda1
MD
70 if (ip->ino_rec.ino_nlinks == 0)
71 vrecycle(ap->a_vp);
427e5fc6
MD
72 return(0);
73}
74
d113fda1
MD
75/*
76 * Release the vnode association. This is typically (but not always)
77 * the last reference on the inode and will flush the inode to the
78 * buffer cache.
79 *
80 * XXX Currently our sync code only runs through inodes with vnode
81 * associations, so we depend on hammer_rel_inode() to sync any inode
82 * record data to the block device prior to losing the association.
83 * Otherwise transactions that the user expected to be distinct by
84 * doing a manual sync may be merged.
85 */
427e5fc6
MD
86int
87hammer_vop_reclaim(struct vop_reclaim_args *ap)
88{
427e5fc6
MD
89 struct hammer_inode *ip;
90 struct vnode *vp;
91
92 vp = ap->a_vp;
c0ade690 93
a89aec1b
MD
94 if ((ip = vp->v_data) != NULL) {
95 vp->v_data = NULL;
96 ip->vp = NULL;
d113fda1 97 hammer_rel_inode(ip, 0);
a89aec1b 98 }
427e5fc6
MD
99 return(0);
100}
101
66325755
MD
102/*
103 * Return a locked vnode for the specified inode. The inode must be
104 * referenced but NOT LOCKED on entry and will remain referenced on
105 * return.
b84de5af
MD
106 *
107 * Called from the frontend.
66325755
MD
108 */
109int
110hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
111{
112 struct vnode *vp;
113 int error = 0;
114
115 for (;;) {
116 if ((vp = ip->vp) == NULL) {
117 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
118 if (error)
119 break;
8cd0a023
MD
120 hammer_lock_ex(&ip->lock);
121 if (ip->vp != NULL) {
122 hammer_unlock(&ip->lock);
123 vp->v_type = VBAD;
124 vx_put(vp);
125 continue;
66325755 126 }
8cd0a023
MD
127 hammer_ref(&ip->lock);
128 vp = *vpp;
129 ip->vp = vp;
130 vp->v_type = hammer_get_vnode_type(
131 ip->ino_rec.base.base.obj_type);
7a04d74f
MD
132
133 switch(ip->ino_rec.base.base.obj_type) {
134 case HAMMER_OBJTYPE_CDEV:
135 case HAMMER_OBJTYPE_BDEV:
136 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
137 addaliasu(vp, ip->ino_data.rmajor,
138 ip->ino_data.rminor);
139 break;
140 case HAMMER_OBJTYPE_FIFO:
141 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
142 break;
143 default:
144 break;
145 }
42c7d26b
MD
146
147 /*
148 * Only mark as the root vnode if the ip is not
149 * historical, otherwise the VFS cache will get
150 * confused. The other half of the special handling
151 * is in hammer_vop_nlookupdotdot().
152 */
153 if (ip->obj_id == HAMMER_OBJID_ROOT &&
154 ip->obj_asof == ip->hmp->asof) {
7a04d74f 155 vp->v_flag |= VROOT;
42c7d26b 156 }
7a04d74f 157
8cd0a023
MD
158 vp->v_data = (void *)ip;
159 /* vnode locked by getnewvnode() */
160 /* make related vnode dirty if inode dirty? */
161 hammer_unlock(&ip->lock);
a89aec1b
MD
162 if (vp->v_type == VREG)
163 vinitvmio(vp, ip->ino_rec.ino_size);
8cd0a023
MD
164 break;
165 }
166
167 /*
168 * loop if the vget fails (aka races), or if the vp
169 * no longer matches ip->vp.
170 */
171 if (vget(vp, LK_EXCLUSIVE) == 0) {
172 if (vp == ip->vp)
173 break;
174 vput(vp);
66325755
MD
175 }
176 }
a89aec1b 177 *vpp = vp;
66325755
MD
178 return(error);
179}
180
181/*
8cd0a023
MD
182 * Acquire a HAMMER inode. The returned inode is not locked. These functions
183 * do not attach or detach the related vnode (use hammer_get_vnode() for
184 * that).
d113fda1
MD
185 *
186 * The flags argument is only applied for newly created inodes, and only
187 * certain flags are inherited.
b84de5af
MD
188 *
189 * Called from the frontend.
66325755
MD
190 */
191struct hammer_inode *
36f82b23 192hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
61aeeb33 193 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
66325755 194{
36f82b23 195 hammer_mount_t hmp = trans->hmp;
427e5fc6 196 struct hammer_inode_info iinfo;
8cd0a023 197 struct hammer_cursor cursor;
427e5fc6 198 struct hammer_inode *ip;
427e5fc6
MD
199
200 /*
201 * Determine if we already have an inode cached. If we do then
202 * we are golden.
203 */
66325755 204 iinfo.obj_id = obj_id;
7f7c1f84 205 iinfo.obj_asof = asof;
427e5fc6
MD
206loop:
207 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
208 if (ip) {
8cd0a023 209 hammer_ref(&ip->lock);
66325755
MD
210 *errorp = 0;
211 return(ip);
427e5fc6
MD
212 }
213
427e5fc6 214 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 215 ++hammer_count_inodes;
66325755 216 ip->obj_id = obj_id;
27ea2398 217 ip->obj_asof = iinfo.obj_asof;
66325755 218 ip->hmp = hmp;
d113fda1 219 ip->flags = flags & HAMMER_INODE_RO;
b84de5af 220 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
d113fda1
MD
221 if (hmp->ronly)
222 ip->flags |= HAMMER_INODE_RO;
8cd0a023 223 RB_INIT(&ip->rec_tree);
059819e3 224 TAILQ_INIT(&ip->bio_list);
b84de5af 225 TAILQ_INIT(&ip->bio_alt_list);
427e5fc6
MD
226
227 /*
8cd0a023 228 * Locate the on-disk inode.
427e5fc6 229 */
6a37e7e4 230retry:
36f82b23 231 hammer_init_cursor(trans, &cursor, cache);
8cd0a023
MD
232 cursor.key_beg.obj_id = ip->obj_id;
233 cursor.key_beg.key = 0;
d5530d22 234 cursor.key_beg.create_tid = 0;
8cd0a023
MD
235 cursor.key_beg.delete_tid = 0;
236 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
237 cursor.key_beg.obj_type = 0;
d5530d22
MD
238 cursor.asof = iinfo.obj_asof;
239 cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA |
240 HAMMER_CURSOR_ASOF;
8cd0a023
MD
241
242 *errorp = hammer_btree_lookup(&cursor);
6a37e7e4
MD
243 if (*errorp == EDEADLK) {
244 hammer_done_cursor(&cursor);
245 goto retry;
246 }
427e5fc6
MD
247
248 /*
249 * On success the B-Tree lookup will hold the appropriate
250 * buffer cache buffers and provide a pointer to the requested
d113fda1
MD
251 * information. Copy the information to the in-memory inode
252 * and cache the B-Tree node to improve future operations.
427e5fc6 253 */
66325755 254 if (*errorp == 0) {
8cd0a023 255 ip->ino_rec = cursor.record->inode;
40043e7f 256 ip->ino_data = cursor.data->inode;
61aeeb33
MD
257 hammer_cache_node(cursor.node, &ip->cache[0]);
258 if (cache)
259 hammer_cache_node(cursor.node, cache);
427e5fc6 260 }
427e5fc6
MD
261
262 /*
263 * On success load the inode's record and data and insert the
264 * inode into the B-Tree. It is possible to race another lookup
265 * insertion of the same inode so deal with that condition too.
b3deaf57
MD
266 *
267 * The cursor's locked node interlocks against others creating and
268 * destroying ip while we were blocked.
427e5fc6 269 */
66325755 270 if (*errorp == 0) {
8cd0a023 271 hammer_ref(&ip->lock);
427e5fc6 272 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
61aeeb33
MD
273 hammer_uncache_node(&ip->cache[0]);
274 hammer_uncache_node(&ip->cache[1]);
b84de5af 275 KKASSERT(ip->lock.refs == 1);
b3deaf57 276 --hammer_count_inodes;
427e5fc6 277 kfree(ip, M_HAMMER);
b3deaf57 278 hammer_done_cursor(&cursor);
427e5fc6
MD
279 goto loop;
280 }
c0ade690 281 ip->flags |= HAMMER_INODE_ONDISK;
427e5fc6 282 } else {
b3deaf57 283 --hammer_count_inodes;
66325755
MD
284 kfree(ip, M_HAMMER);
285 ip = NULL;
427e5fc6 286 }
b3deaf57 287 hammer_done_cursor(&cursor);
66325755
MD
288 return (ip);
289}
290
8cd0a023
MD
291/*
292 * Create a new filesystem object, returning the inode in *ipp. The
b84de5af
MD
293 * returned inode will be referenced and shared-locked. The caller
294 * must unlock and release it when finished.
8cd0a023 295 *
b84de5af 296 * The inode is created in-memory.
8cd0a023
MD
297 */
298int
a89aec1b
MD
299hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
300 struct ucred *cred, hammer_inode_t dip,
8cd0a023 301 struct hammer_inode **ipp)
66325755 302{
a89aec1b
MD
303 hammer_mount_t hmp;
304 hammer_inode_t ip;
6b4f890b 305 uid_t xuid;
66325755 306
8cd0a023
MD
307 hmp = trans->hmp;
308 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 309 ++hammer_count_inodes;
a89aec1b 310 ip->obj_id = hammer_alloc_tid(trans);
8cd0a023 311 KKASSERT(ip->obj_id != 0);
7f7c1f84 312 ip->obj_asof = hmp->asof;
8cd0a023 313 ip->hmp = hmp;
b84de5af 314 ip->flush_state = HAMMER_FST_IDLE;
8cd0a023 315 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
b84de5af 316 HAMMER_INODE_ITIMES;
8cd0a023
MD
317
318 RB_INIT(&ip->rec_tree);
059819e3 319 TAILQ_INIT(&ip->bio_list);
b84de5af 320 TAILQ_INIT(&ip->bio_alt_list);
8cd0a023 321
b84de5af
MD
322 ip->ino_rec.ino_atime = trans->time;
323 ip->ino_rec.ino_mtime = trans->time;
8cd0a023
MD
324 ip->ino_rec.ino_size = 0;
325 ip->ino_rec.ino_nlinks = 0;
326 /* XXX */
fe7678ee 327 ip->ino_rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
8cd0a023
MD
328 ip->ino_rec.base.base.obj_id = ip->obj_id;
329 ip->ino_rec.base.base.key = 0;
b84de5af 330 ip->ino_rec.base.base.create_tid = 0;
8cd0a023
MD
331 ip->ino_rec.base.base.delete_tid = 0;
332 ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
333 ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
334
335 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
336 ip->ino_data.mode = vap->va_mode;
b84de5af 337 ip->ino_data.ctime = trans->time;
8cd0a023 338 ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
6b4f890b 339
7a04d74f
MD
340 switch(ip->ino_rec.base.base.obj_type) {
341 case HAMMER_OBJTYPE_CDEV:
342 case HAMMER_OBJTYPE_BDEV:
343 ip->ino_data.rmajor = vap->va_rmajor;
344 ip->ino_data.rminor = vap->va_rminor;
345 break;
346 default:
347 break;
348 }
349
6b4f890b
MD
350 /*
351 * Calculate default uid/gid and overwrite with information from
352 * the vap.
353 */
354 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
355 ip->ino_data.gid = dip->ino_data.gid;
356 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
357 &vap->va_mode);
358 ip->ino_data.mode = vap->va_mode;
359
8cd0a023
MD
360 if (vap->va_vaflags & VA_UID_UUID_VALID)
361 ip->ino_data.uid = vap->va_uid_uuid;
6b4f890b
MD
362 else if (vap->va_uid != (uid_t)VNOVAL)
363 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
8cd0a023
MD
364 if (vap->va_vaflags & VA_GID_UUID_VALID)
365 ip->ino_data.gid = vap->va_gid_uuid;
6b4f890b 366 else if (vap->va_gid != (gid_t)VNOVAL)
8cd0a023
MD
367 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
368
369 hammer_ref(&ip->lock);
b84de5af 370 hammer_lock_sh(&ip->lock);
8cd0a023
MD
371 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
372 hammer_unref(&ip->lock);
a89aec1b 373 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
8cd0a023
MD
374 }
375 *ipp = ip;
376 return(0);
66325755
MD
377}
378
d113fda1
MD
379/*
380 * Called by hammer_sync_inode().
381 */
382static int
36f82b23 383hammer_update_inode(hammer_transaction_t trans, hammer_inode_t ip)
c0ade690
MD
384{
385 struct hammer_cursor cursor;
386 hammer_record_t record;
387 int error;
388
389 /*
76376933 390 * Locate the record on-disk and mark it as deleted. Both the B-Tree
195c19a1
MD
391 * node and the record must be marked deleted. The record may or
392 * may not be physically deleted, depending on the retention policy.
76376933 393 *
195c19a1
MD
394 * If the inode has already been deleted on-disk we have nothing
395 * to do.
c0ade690
MD
396 *
397 * XXX Update the inode record and data in-place if the retention
398 * policy allows it.
399 */
d26d0ae9 400retry:
c0ade690
MD
401 error = 0;
402
76376933
MD
403 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
404 HAMMER_INODE_ONDISK) {
36f82b23 405 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
c0ade690
MD
406 cursor.key_beg.obj_id = ip->obj_id;
407 cursor.key_beg.key = 0;
d5530d22 408 cursor.key_beg.create_tid = 0;
c0ade690
MD
409 cursor.key_beg.delete_tid = 0;
410 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
411 cursor.key_beg.obj_type = 0;
d5530d22
MD
412 cursor.asof = ip->obj_asof;
413 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
b84de5af 414 cursor.flags |= HAMMER_CURSOR_BACKEND;
c0ade690
MD
415
416 error = hammer_btree_lookup(&cursor);
b84de5af
MD
417 if (error) {
418 kprintf("error %d\n", error);
419 Debugger("hammer_update_inode");
420 }
421
c0ade690
MD
422
423 if (error == 0) {
855942b6 424 error = hammer_ip_delete_record(&cursor, trans->tid);
f90dde4c 425 if (error && error != EDEADLK) {
b84de5af
MD
426 kprintf("error %d\n", error);
427 Debugger("hammer_update_inode2");
428 }
195c19a1
MD
429 if (error == 0)
430 ip->flags |= HAMMER_INODE_DELONDISK;
6a37e7e4 431 hammer_cache_node(cursor.node, &ip->cache[0]);
c0ade690 432 }
c0ade690 433 hammer_done_cursor(&cursor);
6a37e7e4
MD
434 if (error == EDEADLK)
435 goto retry;
c0ade690
MD
436 }
437
438 /*
439 * Write out a new record if the in-memory inode is not marked
fbc6e32a
MD
440 * as having been deleted. Update our inode statistics if this
441 * is the first application of the inode on-disk.
76376933
MD
442 *
443 * If the inode has been deleted permanently, HAMMER_INODE_DELONDISK
444 * will remain set and prevent further updates.
c0ade690
MD
445 */
446 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
40043e7f 447 record = hammer_alloc_mem_record(ip);
b84de5af
MD
448 record->state = HAMMER_FST_FLUSH;
449 record->rec.inode = ip->sync_ino_rec;
855942b6 450 record->rec.inode.base.base.create_tid = trans->tid;
b84de5af
MD
451 record->rec.inode.base.data_len = sizeof(ip->sync_ino_data);
452 record->data = (void *)&ip->sync_ino_data;
36f82b23 453 error = hammer_ip_sync_record(trans, record);
b84de5af
MD
454 if (error) {
455 kprintf("error %d\n", error);
456 Debugger("hammer_update_inode3");
457 }
458 record->flags |= HAMMER_RECF_DELETED_FE;
b3deaf57 459 hammer_rel_mem_record(record);
d26d0ae9 460 if (error == 0) {
b84de5af
MD
461 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
462 HAMMER_INODE_DDIRTY |
463 HAMMER_INODE_ITIMES);
464 ip->flags &= ~HAMMER_INODE_DELONDISK;
d26d0ae9 465 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
10a5d1ba 466 hammer_modify_volume(trans, trans->rootvol,
36f82b23 467 NULL, 0);
0b075555 468 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 469 hammer_modify_volume_done(trans->rootvol);
d26d0ae9
MD
470 ip->flags |= HAMMER_INODE_ONDISK;
471 }
fbc6e32a 472 }
c0ade690 473 }
f90dde4c
MD
474 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
475 /*
476 * Clean out any left-over flags if the inode has been
477 * destroyed.
478 */
479 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
480 HAMMER_INODE_DDIRTY |
481 HAMMER_INODE_ITIMES);
482 }
c0ade690
MD
483 return(error);
484}
485
a89aec1b 486/*
d113fda1
MD
487 * Update only the itimes fields. This is done no-historically. The
488 * record is updated in-place on the disk.
489 */
490static int
36f82b23 491hammer_update_itimes(hammer_transaction_t trans, hammer_inode_t ip)
d113fda1
MD
492{
493 struct hammer_cursor cursor;
494 struct hammer_inode_record *rec;
495 int error;
496
6a37e7e4 497retry:
d113fda1
MD
498 error = 0;
499 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
500 HAMMER_INODE_ONDISK) {
36f82b23 501 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
d113fda1
MD
502 cursor.key_beg.obj_id = ip->obj_id;
503 cursor.key_beg.key = 0;
d5530d22 504 cursor.key_beg.create_tid = 0;
d113fda1
MD
505 cursor.key_beg.delete_tid = 0;
506 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
507 cursor.key_beg.obj_type = 0;
d5530d22
MD
508 cursor.asof = ip->obj_asof;
509 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
b84de5af 510 cursor.flags |= HAMMER_CURSOR_BACKEND;
d113fda1
MD
511
512 error = hammer_btree_lookup(&cursor);
b84de5af
MD
513 if (error) {
514 kprintf("error %d\n", error);
515 Debugger("hammer_update_itimes1");
516 }
d113fda1 517 if (error == 0) {
10a5d1ba
MD
518 /*
519 * Do not generate UNDO records for atime/mtime
520 * updates.
521 */
d113fda1 522 rec = &cursor.record->inode;
36f82b23
MD
523 hammer_modify_buffer(cursor.trans, cursor.record_buffer,
524 NULL, 0);
b84de5af
MD
525 rec->ino_atime = ip->sync_ino_rec.ino_atime;
526 rec->ino_mtime = ip->sync_ino_rec.ino_mtime;
10a5d1ba 527 hammer_modify_buffer_done(cursor.record_buffer);
b84de5af 528 ip->sync_flags &= ~HAMMER_INODE_ITIMES;
d113fda1 529 /* XXX recalculate crc */
6a37e7e4 530 hammer_cache_node(cursor.node, &ip->cache[0]);
d113fda1 531 }
d113fda1 532 hammer_done_cursor(&cursor);
6a37e7e4
MD
533 if (error == EDEADLK)
534 goto retry;
d113fda1
MD
535 }
536 return(error);
537}
538
539/*
540 * Release a reference on an inode. If asked to flush the last release
541 * will flush the inode.
b84de5af
MD
542 *
543 * On the last reference we queue the inode to the flusher for its final
544 * disposition.
a89aec1b 545 */
66325755 546void
a89aec1b 547hammer_rel_inode(struct hammer_inode *ip, int flush)
66325755 548{
f90dde4c
MD
549 /*
550 * Handle disposition when dropping the last ref.
551 */
552 while (ip->lock.refs == 1) {
b84de5af
MD
553 if (curthread == ip->hmp->flusher_td) {
554 /*
f90dde4c
MD
555 * We are the flusher, do any required flushes
556 * before unloading the inode.
b84de5af 557 */
f90dde4c
MD
558 int error = 0;
559
b84de5af 560 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
f90dde4c
MD
561 while (error == 0 &&
562 (ip->flags & HAMMER_INODE_MODMASK)) {
563 hammer_ref(&ip->lock);
564 hammer_flush_inode_copysync(ip);
565 error = hammer_sync_inode(ip, 1);
566 hammer_flush_inode_done(ip);
567 }
568 if (error)
569 kprintf("hammer_sync_inode failed error %d\n",
570 error);
571 if (ip->lock.refs > 1)
572 continue;
d113fda1 573 hammer_unload_inode(ip, (void *)MNT_NOWAIT);
f90dde4c
MD
574 return;
575 }
576 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
577 hammer_unload_inode(ip, (void *)MNT_NOWAIT);
578 return;
579 }
580
581 /*
582 * Hand the inode over to the flusher, which will
583 * add another ref to it.
584 */
585 if (++ip->hmp->reclaim_count > 256) {
586 ip->hmp->reclaim_count = 0;
587 hammer_flush_inode(ip, HAMMER_FLUSH_FORCE |
588 HAMMER_FLUSH_SIGNAL);
b84de5af 589 } else {
f90dde4c 590 hammer_flush_inode(ip, HAMMER_FLUSH_FORCE);
b84de5af 591 }
f90dde4c
MD
592 /* retry */
593 }
594
595 /*
596 * Inode still has multiple refs
597 */
598 if (flush && ip->flush_state == HAMMER_FST_IDLE &&
b84de5af
MD
599 curthread != ip->hmp->flusher_td) {
600 /*
601 * Flush requested, make the inode visible to the flusher.
602 * Flush_list inherits our reference (which may or may not
603 * be the last reference).
604 *
605 * Only the flusher can actually destroy the inode,
606 * there had better still be a ref on it if we aren't
607 * it.
608 */
609 hammer_flush_inode(ip, 0);
610 KKASSERT(ip->lock.refs > 1);
611 hammer_unref(&ip->lock);
612 } else {
613 /*
614 * Just dereference, additional references still remain
615 */
616 hammer_unref(&ip->lock);
d113fda1 617 }
427e5fc6
MD
618}
619
27ea2398 620/*
b84de5af
MD
621 * Unload and destroy the specified inode. Must be called with one remaining
622 * reference. The reference is disposed of.
8cd0a023 623 *
b84de5af 624 * This can only be called in the context of the flusher.
27ea2398 625 */
b84de5af 626static int
d113fda1 627hammer_unload_inode(struct hammer_inode *ip, void *data)
27ea2398 628{
c0ade690 629
b84de5af 630 KASSERT(ip->lock.refs == 1,
a89aec1b 631 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
8cd0a023 632 KKASSERT(ip->vp == NULL);
f90dde4c
MD
633 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
634 KKASSERT(ip->cursor_ip_refs == 0);
635 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
636
637 KKASSERT(RB_EMPTY(&ip->rec_tree));
638 KKASSERT(TAILQ_EMPTY(&ip->bio_list));
639 KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
640
641 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
642
643 hammer_uncache_node(&ip->cache[0]);
644 hammer_uncache_node(&ip->cache[1]);
645 --hammer_count_inodes;
646 kfree(ip, M_HAMMER);
6b4f890b 647
27ea2398
MD
648 return(0);
649}
650
427e5fc6 651/*
d113fda1
MD
652 * A transaction has modified an inode, requiring updates as specified by
653 * the passed flags.
7f7c1f84 654 *
d113fda1
MD
655 * HAMMER_INODE_RDIRTY: Inode record has been updated
656 * HAMMER_INODE_DDIRTY: Inode data has been updated
b84de5af 657 * HAMMER_INODE_XDIRTY: Dirty frontend buffer cache buffer strategized
d113fda1
MD
658 * HAMMER_INODE_DELETED: Inode record/data must be deleted
659 * HAMMER_INODE_ITIMES: mtime/atime has been updated
427e5fc6 660 */
66325755 661void
b84de5af 662hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
427e5fc6 663{
d113fda1 664 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
b84de5af
MD
665 (flags & (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
666 HAMMER_INODE_XDIRTY|
667 HAMMER_INODE_DELETED|HAMMER_INODE_ITIMES)) == 0);
668
669 ip->flags |= flags;
670}
671
672/*
673 * Flush an inode. If the inode is already being flushed wait for
674 * it to complete, then flush it again. The interlock is against
675 * front-end transactions, the backend flusher does not hold the lock.
676 *
677 * The flusher must distinguish between the records that are part of the
678 * flush and any new records created in parallel with the flush. The
679 * inode data and truncation fields are also copied. BIOs are a bit more
680 * troublesome because some dirty buffers may not have been queued yet.
681 */
682void
f90dde4c 683hammer_flush_inode(hammer_inode_t ip, int flags)
b84de5af
MD
684{
685 if (ip->flush_state != HAMMER_FST_IDLE &&
686 (ip->flags & HAMMER_INODE_MODMASK)) {
687 ip->flags |= HAMMER_INODE_REFLUSH;
688 return;
689 }
690 hammer_lock_ex(&ip->lock);
f90dde4c
MD
691 if (ip->flush_state == HAMMER_FST_IDLE) {
692 if ((ip->flags & HAMMER_INODE_MODMASK) ||
693 (flags & HAMMER_FLUSH_FORCE)) {
694 hammer_ref(&ip->lock);
42c7d26b 695
f90dde4c
MD
696 hammer_flush_inode_copysync(ip);
697 /*
698 * Move the inode to the flush list and add a ref to
699 * it representing it on the list.
700 */
701 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
702 if (flags & HAMMER_FLUSH_SIGNAL)
703 hammer_flusher_async(ip->hmp);
704 }
7f7c1f84 705 }
b84de5af 706 hammer_unlock(&ip->lock);
c0ade690
MD
707}
708
709/*
b84de5af
MD
710 * Helper routine to copy the frontend synchronization state to the backend.
711 * This routine may be called by either the frontend or the backend.
c0ade690 712 */
b84de5af
MD
713static void
714hammer_flush_inode_copysync(hammer_inode_t ip)
715{
716 int error;
717
718 ip->flush_state = HAMMER_FST_SETUP;
719
720 /*
721 * Sync the buffer cache. This will queue the BIOs. If called
722 * from the context of the flusher the BIO's are thrown into bio_list
723 * regardless of ip->flush_state.
724 */
725 if (ip->vp != NULL)
726 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
727 else
728 error = 0;
729
730 /*
731 * This freezes strategy writes, any further BIOs will be
732 * queued to alt_bio (unless we are
733 */
734 ip->flush_state = HAMMER_FST_FLUSH;
735
736 /*
737 * Snapshot the state of the inode for the backend flusher.
738 *
739 * The truncation must be retained in the frontend until after
740 * we've actually performed the record deletion.
741 */
742 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
743 ip->sync_trunc_off = ip->trunc_off;
744 ip->sync_ino_rec = ip->ino_rec;
745 ip->sync_ino_data = ip->ino_data;
746 ip->flags &= ~HAMMER_INODE_MODMASK |
747 HAMMER_INODE_TRUNCATED | HAMMER_INODE_BUFS;
748
749 /*
750 * Fix up the dirty buffer status.
751 */
752 if (ip->vp == NULL || RB_ROOT(&ip->vp->v_rbdirty_tree) == NULL)
753 ip->flags &= ~HAMMER_INODE_BUFS;
754 if (TAILQ_FIRST(&ip->bio_list))
755 ip->sync_flags |= HAMMER_INODE_BUFS;
756 else
757 ip->sync_flags &= ~HAMMER_INODE_BUFS;
758
759 /*
760 * Set the state for the inode's in-memory records.
761 */
762 RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
763 hammer_mark_record_callback, NULL);
764}
765
c0ade690 766static int
b84de5af
MD
767hammer_mark_record_callback(hammer_record_t rec, void *data)
768{
769 if ((rec->flags & HAMMER_RECF_DELETED_FE) == 0) {
770 rec->state = HAMMER_FST_FLUSH;
771 hammer_ref(&rec->lock);
772 }
773 return(0);
774}
775
776
777
778/*
779 * Wait for a previously queued flush to complete
780 */
781void
782hammer_wait_inode(hammer_inode_t ip)
783{
784 while (ip->flush_state == HAMMER_FST_FLUSH) {
785 ip->flags |= HAMMER_INODE_FLUSHW;
786 tsleep(&ip->flags, 0, "hmrwin", 0);
787 }
788}
789
790/*
791 * Called by the backend code when a flush has been completed.
792 * The inode has already been removed from the flush list.
793 *
794 * A pipelined flush can occur, in which case we must re-enter the
795 * inode on the list and re-copy its fields.
796 */
797void
798hammer_flush_inode_done(hammer_inode_t ip)
799{
1955afa7
MD
800 struct bio *bio;
801
b84de5af
MD
802 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
803
804 if (ip->sync_flags)
805 kprintf("ip %p leftover sync_flags %08x\n", ip, ip->sync_flags);
806 ip->flags |= ip->sync_flags;
807 ip->flush_state = HAMMER_FST_IDLE;
808
b84de5af 809 /*
1955afa7
MD
810 * Reflush any BIOs that wound up in the alt list. Our inode will
811 * also wind up at the end of the flusher's list.
b84de5af
MD
812 */
813 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
814 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
815 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
f90dde4c 816 ip->flags |= HAMMER_INODE_XDIRTY;
b84de5af
MD
817 ip->flags |= HAMMER_INODE_REFLUSH;
818 kprintf("rebio %p ip %p @%016llx,%d\n", bio, ip, bio->bio_offset, bio->bio_buf->b_bufsize);
819 }
b84de5af
MD
820
821 /*
822 * If the frontend made more changes and requested another flush,
823 * do it.
824 */
825 if (ip->flags & HAMMER_INODE_REFLUSH) {
826 ip->flags &= ~HAMMER_INODE_REFLUSH;
b84de5af
MD
827 hammer_flush_inode(ip, 0);
828 } else {
829 if (ip->flags & HAMMER_INODE_FLUSHW) {
830 ip->flags &= ~HAMMER_INODE_FLUSHW;
831 wakeup(&ip->flags);
832 }
833 }
834 hammer_rel_inode(ip, 0);
835}
836
837/*
838 * Called from hammer_sync_inode() to synchronize in-memory records
839 * to the media.
840 */
841static int
842hammer_sync_record_callback(hammer_record_t record, void *data)
c0ade690 843{
36f82b23 844 hammer_transaction_t trans = data;
c0ade690
MD
845 int error;
846
b84de5af
MD
847 /*
848 * Skip records that do not belong to the current flush. Records
849 * belonging to the flush will have been referenced for us.
850 *
851 * Skip records that were deleted by the backend itself. Records
852 * deleted by the frontend after their state has changed to FLUSH
853 * are not considered to be deleted by the backend.
854 *
855 * XXX special delete-on-disk records can be deleted by the backend
856 * prior to the sync due to a truncation operation. This is kinda
857 * a hack to deal with it.
858 */
859 if (record->state != HAMMER_FST_FLUSH)
860 return(0);
861 if (record->flags & HAMMER_RECF_DELETED_BE) {
862 hammer_flush_record_done(record);
863 return(0);
864 }
865
866 /*
867 * Assign the create_tid for new records. Deletions already
868 * have the record's entire key properly set up.
869 */
870 if ((record->flags & HAMMER_RECF_DELETE_ONDISK) == 0)
871 record->rec.inode.base.base.create_tid = trans->tid;
872 error = hammer_ip_sync_record(trans, record);
c0ade690
MD
873
874 if (error) {
b3deaf57
MD
875 error = -error;
876 if (error != -ENOSPC) {
b84de5af
MD
877 kprintf("hammer_sync_record_callback: sync failed rec "
878 "%p, error %d\n", record, error);
879 Debugger("sync failed rec");
b3deaf57 880 }
c0ade690 881 }
b84de5af 882 hammer_flush_record_done(record);
b3deaf57 883 return(error);
c0ade690
MD
884}
885
886/*
887 * XXX error handling
888 */
889int
b84de5af 890hammer_sync_inode(hammer_inode_t ip, int handle_delete)
c0ade690
MD
891{
892 struct hammer_transaction trans;
059819e3 893 struct bio *bio;
c0ade690 894 int error;
c0ade690 895
b84de5af
MD
896 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0 &&
897 handle_delete == 0) {
d113fda1
MD
898 return(0);
899 }
900
c0ade690 901 hammer_lock_ex(&ip->lock);
d113fda1 902
b84de5af 903 hammer_start_transaction_fls(&trans, ip->hmp);
c0ade690
MD
904
905 /*
b84de5af 906 * Sync inode deletions and truncations.
c0ade690 907 */
b84de5af 908 if (ip->sync_ino_rec.ino_nlinks == 0 && handle_delete &&
d113fda1 909 (ip->flags & HAMMER_INODE_GONE) == 0) {
b84de5af
MD
910 /*
911 * Handle the case where the inode has been completely deleted
912 * and is no longer referenceable from the filesystem
913 * namespace.
914 *
915 * NOTE: We do not set the RDIRTY flag when updating the
916 * delete_tid, setting HAMMER_INODE_DELETED takes care of it.
917 */
918
919 ip->flags |= HAMMER_INODE_GONE | HAMMER_INODE_DELETED;
920 ip->flags &= ~HAMMER_INODE_TRUNCATED;
921 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
c0ade690
MD
922 if (ip->vp)
923 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
7a04d74f 924 error = hammer_ip_delete_range_all(&trans, ip);
b84de5af
MD
925 if (error)
926 Debugger("hammer_ip_delete_range_all errored");
927
928 /*
929 * Sanity check. The only records that remain should be
930 * marked for back-end deletion.
931 */
932 {
933 hammer_record_t rec;
934
935 RB_FOREACH(rec, hammer_rec_rb_tree, &ip->rec_tree) {
936 KKASSERT(rec->flags & HAMMER_RECF_DELETED_BE);
937 }
938 }
939
940 /*
941 * Set delete_tid in both the frontend and backend
942 * copy of the inode record.
943 */
c0ade690 944 ip->ino_rec.base.base.delete_tid = trans.tid;
b84de5af
MD
945 ip->sync_ino_rec.base.base.delete_tid = trans.tid;
946
947 /*
948 * Indicate that the inode has/is-being deleted.
949 */
950 ip->flags |= HAMMER_NODE_DELETED;
951 hammer_modify_inode(&trans, ip, HAMMER_INODE_RDIRTY);
10a5d1ba 952 hammer_modify_volume(&trans, trans.rootvol, NULL, 0);
0b075555 953 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 954 hammer_modify_volume_done(trans.rootvol);
b84de5af
MD
955 } else if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
956 /*
957 * Interlock trunc_off. The VOP front-end may continue to
958 * make adjustments to it while we are blocked.
959 */
960 off_t trunc_off;
961 off_t aligned_trunc_off;
c0ade690 962
b84de5af
MD
963 trunc_off = ip->sync_trunc_off;
964 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
965 ~HAMMER_BUFMASK64;
966
967 /*
968 * Delete any whole blocks on-media. The front-end has
969 * already cleaned out any partial block and made it
970 * pending. The front-end may have updated trunc_off
971 * while we were blocked so do not just unconditionally
972 * set it to the maximum offset.
973 */
974 kprintf("sync truncation range @ %016llx\n", aligned_trunc_off);
975 error = hammer_ip_delete_range(&trans, ip,
976 aligned_trunc_off,
977 0x7FFFFFFFFFFFFFFFLL);
978 if (error)
979 Debugger("hammer_ip_delete_range errored");
980 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
981 if (ip->trunc_off >= trunc_off) {
982 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
983 ip->flags &= ~HAMMER_INODE_TRUNCATED;
984 }
f3b0f382
MD
985 }
986
b84de5af
MD
987 error = 0; /* XXX vfsync used to be here */
988
059819e3 989 /*
b84de5af 990 * Flush any queued BIOs.
059819e3
MD
991 */
992 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
993 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
b84de5af
MD
994#if 0
995 kprintf("dowrite %016llx ip %p bio %p @ %016llx\n", trans.tid, ip, bio, bio->bio_offset);
996#endif
059819e3
MD
997 hammer_dowrite(&trans, ip, bio);
998 }
b84de5af 999 ip->sync_flags &= ~HAMMER_INODE_BUFS;
c0ade690
MD
1000
1001 /*
b84de5af 1002 * Now sync related records.
c0ade690 1003 */
d26d0ae9
MD
1004 for (;;) {
1005 error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
b84de5af 1006 hammer_sync_record_callback, &trans);
d26d0ae9
MD
1007 KKASSERT(error <= 0);
1008 if (error < 0)
1009 error = -error;
d26d0ae9 1010 break;
c0ade690 1011 }
059819e3 1012 if (RB_EMPTY(&ip->rec_tree) && TAILQ_EMPTY(&ip->bio_list))
b84de5af
MD
1013 ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1014 if (error)
1015 Debugger("RB_SCAN errored");
c0ade690
MD
1016
1017 /*
1018 * Now update the inode's on-disk inode-data and/or on-disk record.
b84de5af 1019 * DELETED and ONDISK are managed only in ip->flags.
c0ade690 1020 */
b84de5af 1021 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
c0ade690
MD
1022 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1023 /*
1024 * If deleted and on-disk, don't set any additional flags.
1025 * the delete flag takes care of things.
1026 */
1027 break;
1028 case HAMMER_INODE_DELETED:
1029 /*
1030 * Take care of the case where a deleted inode was never
1031 * flushed to the disk in the first place.
1032 */
b84de5af
MD
1033 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1034 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES);
d26d0ae9
MD
1035 while (RB_ROOT(&ip->rec_tree)) {
1036 hammer_record_t rec = RB_ROOT(&ip->rec_tree);
1037 hammer_ref(&rec->lock);
b84de5af
MD
1038 KKASSERT(rec->lock.refs == 1);
1039 rec->flags |= HAMMER_RECF_DELETED_FE |
1040 HAMMER_RECF_DELETED_BE;
b3deaf57 1041 hammer_rel_mem_record(rec);
d26d0ae9 1042 }
c0ade690
MD
1043 break;
1044 case HAMMER_INODE_ONDISK:
1045 /*
1046 * If already on-disk, do not set any additional flags.
1047 */
1048 break;
1049 default:
1050 /*
1051 * If not on-disk and not deleted, set both dirty flags
b84de5af
MD
1052 * to force an initial record to be written. Also set
1053 * the create_tid for the inode.
1054 *
1055 * Set create_tid in both the frontend and backend
1056 * copy of the inode record.
c0ade690 1057 */
b84de5af
MD
1058 ip->ino_rec.base.base.create_tid = trans.tid;
1059 ip->sync_ino_rec.base.base.create_tid = trans.tid;
1060 ip->sync_flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
c0ade690
MD
1061 break;
1062 }
1063
1064 /*
d113fda1
MD
1065 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1066 * is already on-disk the old record is marked as deleted.
1067 *
1068 * If DELETED is set hammer_update_inode() will delete the existing
1069 * record without writing out a new one.
1070 *
1071 * If *ONLY* the ITIMES flag is set we can update the record in-place.
c0ade690 1072 */
b84de5af
MD
1073 if (ip->flags & HAMMER_INODE_DELETED) {
1074 error = hammer_update_inode(&trans, ip);
1075 } else
1076 if ((ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1077 HAMMER_INODE_ITIMES)) == HAMMER_INODE_ITIMES) {
36f82b23 1078 error = hammer_update_itimes(&trans, ip);
d113fda1 1079 } else
b84de5af
MD
1080 if (ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1081 HAMMER_INODE_ITIMES)) {
36f82b23 1082 error = hammer_update_inode(&trans, ip);
c0ade690 1083 }
b84de5af
MD
1084 if (error)
1085 Debugger("hammer_update_itimes/inode errored");
1086
1087 /*
1088 * Save the TID we used to sync the inode with to make sure we
1089 * do not improperly reuse it.
1090 */
c0ade690 1091 hammer_unlock(&ip->lock);
b84de5af 1092 hammer_done_transaction(&trans);
c0ade690 1093 return(error);
8cd0a023
MD
1094}
1095