summaryrefslogtreecommitdiff
path: root/src/s3ql/cli/mkfs.py
blob: 0f4037bdc03a855bb364dd9a5f2e2ea04da2fca0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
'''
mkfs.py - this file is part of S3QL (http://s3ql.googlecode.com)

Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>

This program can be distributed under the terms of the GNU LGPL.
'''

from __future__ import division, print_function, absolute_import

import sys
import os
from getpass import getpass
import shutil
import logging
import cPickle as pickle
from s3ql import CURRENT_FS_REV
from s3ql.common import (get_backend, get_bucket_home, setup_logging,
                         QuietError, dump_metadata, create_tables,
                         init_tables)
from s3ql.parse_args import ArgumentParser
from s3ql.database import Connection
from s3ql.backends.boto.s3.connection import Location
from s3ql.backends import s3
import time
import tempfile

log = logging.getLogger("mkfs")

def parse_args(args):

    parser = ArgumentParser(
        description="Initializes an S3QL file system")

    parser.add_homedir()
    parser.add_debug_modules()
    parser.add_quiet()
    parser.add_version()
    parser.add_storage_url()
    parser.add_ssl()
    
    parser.add_argument("--s3-location", default='EU', metavar='<name>',
                      choices=('EU', 'us-west-1', 'us-standard', 'ap-southeast-1'),
                      help="Storage location for new S3 buckets. Allowed values: `EU`, "
                           '`us-west-1`, `ap-southeast-1`, or `us-standard`. '
                           '(default: %(default)s)')
    parser.add_argument("-L", default='', help="Filesystem label",
                      dest="label", metavar='<name>',)
    parser.add_argument("--blocksize", type=int, default=10240, metavar='<size>',
                      help="Maximum block size in KB (default: %(default)d)")
    parser.add_argument("--plain", action="store_true", default=False,
                      help="Create unencrypted file system.")
    parser.add_argument("--force", action="store_true", default=False,
                        help="Overwrite any existing data.")

    options = parser.parse_args(args)

    if options.s3_location == 'us-standard':
        options.s3_location = Location.DEFAULT

    if not os.path.exists(options.homedir):
        os.mkdir(options.homedir, 0700)
        
    return options

def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    with get_backend(options.storage_url, options.homedir,
                     options.ssl) as (conn, bucketname):
        if conn.bucket_exists(bucketname):
            if not options.force:
                raise QuietError("Bucket already exists! Use --force to overwrite")
            
            bucket = conn.get_bucket(bucketname)
            log.info('Bucket already exists. Purging old file system data..')
            if not bucket.read_after_delete_consistent():
                log.info('Please note that the new file system may appear inconsistent\n'
                         'for a while until the removals have propagated through the backend.')
            bucket.clear()
            
        elif isinstance(conn, s3.Connection):
            bucket = conn.create_bucket(bucketname, location=options.s3_location)
        else:
            bucket = conn.create_bucket(bucketname)

        if not options.plain:
            if sys.stdin.isatty():
                wrap_pw = getpass("Enter encryption password: ")
                if not wrap_pw == getpass("Confirm encryption password: "):
                    raise QuietError("Passwords don't match.")
            else:
                wrap_pw = sys.stdin.readline().rstrip()

            # Generate data encryption passphrase
            log.info('Generating random encryption key...')
            fh = open('/dev/urandom', "rb", 0) # No buffering
            data_pw = fh.read(32)
            fh.close()

            bucket.passphrase = wrap_pw
            bucket['s3ql_passphrase'] = data_pw
            bucket.passphrase = data_pw

        # Setup database
        home = get_bucket_home(options.storage_url, options.homedir)

        # There can't be a corresponding bucket, so we can safely delete
        # these files.
        if os.path.exists(home + '.db'):
            os.unlink(home + '.db')
        if os.path.exists(home + '-cache'):
            shutil.rmtree(home + '-cache')

        log.info('Creating metadata tables...')
        db = Connection(home + '.db')
        create_tables(db)
        init_tables(db)

        param = dict()
        param['revision'] = CURRENT_FS_REV
        param['seq_no'] = 0
        param['label'] = options.label
        param['blocksize'] = options.blocksize * 1024
        param['needs_fsck'] = False
        param['last_fsck'] = time.time() - time.timezone
        param['last-modified'] = time.time() - time.timezone
        bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')

        log.info('Saving metadata...')
        fh = tempfile.TemporaryFile()
        dump_metadata(fh, db)  
        
        log.info("Compressing & uploading metadata..")         
        fh.seek(0)
        bucket.store_fh("s3ql_metadata", fh, param)
        fh.close()
        pickle.dump(param, open(home + '.params', 'wb'), 2)


if __name__ == '__main__':
    main(sys.argv[1:])