我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用fabric.api.env.key_filename()。
def configure_env(): ''' Configures the fabric env. ''' config = get_config() stage = get_stage() stage_config = get_stage_config(stage) env.user = stage_config.get('user') or config['user'] env.port = stage_config.get('port') or config['port'] env.cwd = stage_config.get('app_dir') or config['app_dir'] env.key_filename = stage_config.get( 'key_filename') or config['key_filename'] env.hosts = [stage_config['host']] ssh_forward_agent = stage_config.get( 'ssh_forward_agent') or config['ssh_forward_agent'] env.forward_agent = ( ssh_forward_agent and str(ssh_forward_agent).lower() == 'true' ) # If Verbose logging is turned on show verbose logs. verbose_logging = stage_config.get('verbose_logging') or config[ 'verbose_logging'] if str(verbose_logging).lower() == 'true': set_verbose_logging()
def deploy_test(key_file_name="../ec2.pem"): env.key_filename = key_file_name changes = local("git status --porcelain", capture=True) if len(changes): print " {}".format(changes) proceed = prompt( "you have uncommited changes, do you want to proceed", default=False, validate=bool ) if not proceed: return git_branch_name = local('git rev-parse --abbrev-ref HEAD', capture=True) with prefix(". /usr/share/virtualenvwrapper/virtualenvwrapper.sh"): with prefix("workon {}".format(virtual_env_name)): run("git pull origin {}".format(git_branch_name)) run("pip install -r requirements.txt") run("alembic upgrade head") run("pkill twistd||true") run("pkill gloss||true") run("twistd multiple_mllp --receiver gloss.ohc_receiver.OhcReceiver") run("gunicorn -w 1 -b 0.0.0:6767 -D gloss.api:app")
def vagrant(): env.srvr = 'vagrant' env.path = os.path.join('/', env.srvr) # this is necessary because ssh will fail when known hosts keys vary # every time vagrant is destroyed, a new key will be generated env.disable_known_hosts = True env.within_virtualenv = 'source {}'.format( os.path.join('~', 'venv', 'bin', 'activate')) result = dict(line.split() for line in local('vagrant ssh-config', capture=True).splitlines()) env.hosts = ['%s:%s' % (result['HostName'], result['Port'])] env.key_filename = result['IdentityFile'] env.user = result['User'] print(env.key_filename, env.hosts, env.user)
def install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'redis') redis_version = software_config.get('version', '3.2.6') redis_port = software_config.get('port', '6379') redis_data_dir = software_config.get('data-directory', '/var/lib/redis') machine.disable_transparent_huge_pages(env.host_string) machine.set_overcommit_memory(env.host_string, 1) put('{}/software/scripts/redis.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x redis.sh") sudo(". ~/redis.sh {} {} {}".format(redis_version, redis_port, redis_data_dir))
def install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'zookeeper') java.v8_install(host_config) port = software_config.get('port', '2181') zk_server_id = software_config.get('id', '0') zk_nodes = ",".join(software_config.get('nodes')) put('{}/software/scripts/zookeeper.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x zookeeper.sh") sudo(". ~/zookeeper.sh {} {} {}".format(port, zk_server_id, zk_nodes))
def mount_ebs_volumes(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo("apt-get -y install xfsprogs") for ebs in host_config['ec2-mounts']: device = ebs['device'] mount = ebs['mount'] sudo("mkdir -p {}".format(mount)) sudo("mv /etc/fstab /etc/fstab.old") sudo("touch /etc/fstab") if sudo('mkfs.xfs -f {0}'.format(device), warn_only=True): run("echo '{0}\t{1}\txfs\tdefaults\t0\t0' | sudo tee -a /etc/fstab".format(device, mount)) sudo('sudo mount -a') logger.info("EBS volume {} : {} mounted.".format(device, mount))
def connect_to_instance_in_ssh(address, keypair_path, user='root'): """ Run the command LS on a given instance :param address: ip or dns name of a machine :type address: str :param keypair_path: keypair path :type keypair_path: str """ env.host_string = address env.user = user env.parallel = False env.key_filename = keypair_path env.disable_known_hosts = True env.connection_attempts = 10 env.timeout = 120 ocb.log(run('ls -la /root'), level='INFO')
def run_download_db(filename=None): """ Downloads the database from the server into your local machine. In order to import the downloaded database, run ``fab import_db`` Usage:: fab prod run_download_db fab prod run_download_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0}:{1}{2} .'.format( ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR'), filename))
def run_download_media(filename=None): """ Downloads the media dump from the server into your local machine. In order to import the downloaded media dump, run ``fab import_media`` Usage:: fab prod run_download_media fab prod run_download_media:filename=foobar.tar.gz """ if not filename: filename = settings.MEDIA_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0}:{1}{2} .'.format( ssh, settings.FAB_SETTING('SERVER_MEDIA_BACKUP_DIR'), filename))
def run_upload_db(filename=None): """ Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0} {1}:{3}'.format( filename, ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR')))
def setup_fabric(): env.user = 'root' env.abort_exception = UpgradeError env.key_filename = settings.SSH_KEY_FILENAME env.warn_only = True output.stdout = False output.aborts = False
def pre_start_hook(app): from ..nodes.models import Node # env.warn_only = True env.user = 'root' env.key_filename = SSH_KEY_FILENAME output.stdout = False output.running = False PLUGIN_DIR = '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/' with app.app_context(): for node in Node.query.all(): env.host_string = node.hostname put('./node_network_plugin.sh', PLUGIN_DIR + 'kuberdock') put('./node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py') run('systemctl restart kuberdock-watcher') print 'Kuberdock node parts are updated'
def __init__(self): env.user = 'root' env.skip_bad_hosts = True env.key_filename = SSH_KEY_FILENAME self._cached_drives = None self._cached_node_ip = None
def vagrant(): """ Run commands using vagrant """ vc = get_vagrant_config() # change from the default user to 'vagrant' env.user = vc['User'] # connect to the port-forwarded ssh env.hosts = ['%s:%s' % (vc['HostName'], vc['Port'])] # use vagrant ssh key env.key_filename = vc['IdentityFile'].strip('"') # Forward the agent if specified: env.forward_agent = vc.get('ForwardAgent', 'no') == 'yes'
def _get_vagrant_connection(): local('vagrant up') result = local('vagrant ssh-config', capture=True) hostname = re.findall(r'HostName\s+([^\n]+)', result)[0] port = re.findall(r'Port\s+([^\n]+)', result)[0] env.hosts = ['%s:%s' % (hostname, port)] env.user = re.findall(r'User\s+([^\n]+)', result)[0] env.key_filename = re.findall(r'IdentityFile\s+([^\n]+)', result)[0].lstrip("\"").rstrip("\"")
def restart_all(config_file): """Restarts crate service on all hosts""" cfg = helper.get_config(config_file) for host_config in cfg['hosts']: env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo('service crate restart')
def tail_log(config_file, lines=50): """Tails the log""" cfg = helper.get_config(config_file) for host_config in cfg['hosts']: env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo('tail -{} /var/log/crate/uber-cluster.log'.format(lines))
def broker_install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) java.v8_install(host_config) software_config = helper.get_software_config(host_config, 'kafka-broker') version = software_config.get('version', '0.10.0.1') put('{}/software/scripts/kafka-broker.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x kafka-broker.sh") sudo(". ~/kafka-broker.sh {}".format(version)) broker_id = software_config.get('broker-id', '0') zk_hosts = software_config.get('zookeeper-hosts', 'localhost:2181') log_directories = software_config.get('log-directories', '/var/lib/kafka-logs') tag = '## ---- CUSTOM CONFIGURATION ---' sudo('echo "{}" | sudo tee -a /srv/kafka/config/server.properties'.format(tag)) sudo('echo "delete.topic.enable = true" | sudo tee -a /srv/kafka/config/server.properties') sudo('echo "broker.id={}" | sudo tee -a /srv/kafka/config/server.properties'.format(broker_id)) sudo('echo "zookeeper.connect={}" | sudo tee -a /srv/kafka/config/server.properties'.format(zk_hosts)) sudo('echo "log.dirs={}" | sudo tee -a /srv/kafka/config/server.properties'.format(log_directories)) sudo('echo "listeners=PLAINTEXT://{}:9093" | sudo tee -a /srv/kafka/config/server.properties'.format(host_config['private-ip'])) sudo('echo "{}" | sudo tee -a /srv/kafka/config/server.properties'.format(tag)) sudo("service kafka restart")
def manager_install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'kafka-manager') zk_hosts = software_config.get('zookeeper-hosts', 'localhost:2181') put('{}/software/scripts/kafka-manager.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x kafka-manager.sh") sudo(". ~/kafka-manager.sh {}".format(zk_hosts))
def delete_topic(config_file, topic): """Deletes a Kafka topic | args: config_file, topic name""" cfg = helper.get_config(config_file) host_config = get_kafka_host_cfg(cfg) cmd = "/srv/kafka/bin/kafka-topics.sh --delete --zookeeper {} --topic {}".format(get_zk_host(cfg), topic) env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo(cmd)
def install_kv(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) put('{}/software/scripts/riak-kv.sh'.format(getcwd()), '~/', use_sudo=True) sudo("chmod +x riak-kv.sh") sudo(". ~/riak-kv.sh")
def v8_install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) put('{}/software/scripts/java-8.sh'.format(getcwd()), '~/', use_sudo=True) sudo("chmod +x java-8.sh") sudo(". ~/java-8.sh")
def nodetool(config_file, cmd): """Send commands to Cassandra nodetool | args: config file, nodetool command """ cfg = helper.get_config(config_file) host_config = get_cassandra_host_cfg(cfg) env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo("nodetool {}".format(cmd))
def __init__(self, hostip): env.hosts = hostip env.user = settings.DEPLOYUSER env.abort_on_prompts = True env.key_filename = settings.DEPLOYKEY
def init_host(self): """ Initial host """ env.host_string = self.host_string env.user = self.host_user env.password = self.host_passwd env.key_filename = self.host_keyfile
def login_server(self): """ Login to server """ local('ssh -i {0} {1}@{2}'.format( env.key_filename, env.user, env.host_string ))
def __init__(self, user, ssh_key, hosts, repository, password): if None in [user, ssh_key, hosts, repository]: # XXX: Charm should block instead. # https://bugs.launchpad.net/bugs/1638772 raise Exception('Missing configuration') self.user = user self.ssh_key = ssh_key self.hosts = hosts.split() self.repository = repository self.password = password self.key_filename = self._write_key() self._init_fabric()
def _init_fabric(self): env.warn_only = True env.connection_attempts = 10 env.timeout = 10 env.user = self.user env.key_filename = self.key_filename env.hosts = self.hosts env.password = self.password
def _production_env(): # Speedup connection setup to server. env.disable_known_hosts = True env.key_filename = [os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')] env.project_root = '~/app/'
def run_command(args): cmd = args.cmd if not cmd: print "Please enter command to run. Example: kitrun.py remotecmd \"ls -l\"" return tier_config = get_tier_config() service_info = get_service_info() tier = tier_config["tier"] region = tier_config["region"] service_name = service_info["name"] public = args.public pem_file = None for deployable in tier_config["deployables"]: if deployable["name"] == service_name: pem_file = deployable["ssh_key"] break else: print "Service {} not found in tier config for {}".format(service_name, tier) sys.exit(1) print "\n*** EXECUTING REMOTE COMMAND '{}' ON SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(cmd, service_name, tier, region) filters = { 'tag:service-name': service_name, "instance-state-name": "running", "tag:tier": tier, } print "Finding ec2 instances in region %s from filters: %s" % (region, filters) instances = get_ec2_instances(region, filters=filters) if not instances: print "Found no running ec2 instances with tag service-name={}".format(service_name) return for ec2 in instances: if not public: ip_address = ec2.private_ip_address else: ip_address = ec2.ip_address print "*** Running '{}' on {}...".format(cmd, ip_address) env.host_string = ip_address env.user = EC2_USERNAME env.key_filename = '~/.ssh/{}'.format(pem_file) run(cmd) print
def make_celery(app=None): if app is None: app = create_app('kubedock', os.path.dirname(__file__)) if SENTRY_ENABLE: import socket import celery import raven from raven.contrib.celery import register_signal from raven.contrib.celery import register_logger_signal from kubedock.settings import MASTER_IP from kubedock.settings import SENTRY_DSN, SENTRY_EXCLUDE_PATHS from kubedock.settings import SENTRY_PROCESSORS from kubedock.utils import get_version from kubedock.kapi.licensing import get_license_info authkey = get_license_info().get('auth_key', 'no installation id') from celery.utils import log class Celery(celery.Celery): def on_configure(self): hostname = "{}({})".format(socket.gethostname(), MASTER_IP) tags = {'installation_id': authkey} client = raven.Client(SENTRY_DSN, name=hostname, release=get_version('kuberdock'), tags=tags, processors=SENTRY_PROCESSORS, exclude_paths=SENTRY_EXCLUDE_PATHS) # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) else: from celery import Celery celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL']) celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True flask_app = app def __call__(self, *args, **kwargs): with app.app_context(): env.user = 'root' env.key_filename = SSH_KEY_FILENAME return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery
def db_install(host_config, config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'crate') sudo("sysctl -w vm.max_map_count=262144") put('{}/software/scripts/crate.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo(". ~/crate.sh") cluster_name = software_config.get('cluster-name') data_dir = software_config.get('data-dir') heap_size = software_config.get('heap-size', '2g') security_group = software_config.get('security-group') product_tag = software_config.get('product-tag') aws_access_key = software_config.get('aws-access-key') aws_secret_key = software_config.get('aws-secret-key') configfile = '/etc/crate/crate.yml' sudo('cp {} {}.save'.format(configfile, configfile)) sudo('echo "### CrateDB Settings ###" | sudo tee -a {}'.format(cluster_name, configfile)) sudo('echo "cluster.name: {}" | sudo tee -a {}'.format(cluster_name, configfile)) sudo('echo "node.name: node-{}" | sudo tee -a {}'.format(host_config['private-ip'], configfile)) sudo('echo "path.data: {}" | sudo tee -a {}'.format(data_dir, configfile)) sudo('echo "network.publish_host: {}" | sudo tee -a {}'.format(host_config['private-ip'], configfile)) sudo('echo "network.host: _site_" | sudo tee -a {}'.format(configfile)) sudo('echo "psql.enabled: true" | sudo tee -a {}'.format(configfile)) sudo('echo "psql.port: 6432" | sudo tee -a {}'.format(configfile)) sudo('echo "license.enterprise: false" | sudo tee -a {}'.format(configfile)) if security_group is not None: sudo('echo "discovery.type: ec2" | sudo tee -a {}'.format(configfile)) sudo('echo "discovery.ec2.groups: {}" | sudo tee -a {}'.format(security_group, configfile)) if product_tag is not None: sudo('echo "discovery.ec2.tag.product: {}" | sudo tee -a {}'.format(product_tag, configfile)) if aws_access_key is not None: sudo('echo "cloud.aws.access_key: {}" | sudo tee -a {}'.format(aws_access_key, configfile)) sudo('echo "cloud.aws.secret_key: {}" | sudo tee -a {}'.format(aws_secret_key, configfile)) default_configfile = '/etc/default/crate' sudo('cp {} {}.save'.format(default_configfile, default_configfile)) sudo('echo "### CrateDB Default Settings ###" | sudo tee {}'.format(default_configfile)) sudo('echo "CRATE_HEAP_SIZE={}" | sudo tee -a {}'.format(heap_size, default_configfile)) for mount in data_dir.split(','): sudo('chown -R crate:crate {}'.format(mount)) sudo("service crate restart")
def install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'cassandra') java.v8_install(host_config) put('{}/software/scripts/cassandra.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x cassandra.sh") sudo(". ~/cassandra.sh {}") # Configuration values from config or use defaults if do not exist cluster_name = software_config.get('cluster-name', 'Test Cluster') data_file_directory = software_config.get('data-file-directory', '/var/lib/cassandra/data') commit_log_directory = software_config.get('commit-log-directory', '/var/lib/cassandra/commit_log') saved_caches_directory = software_config.get('saved-caches-directory', '/var/lib/cassandra/saved_caches') endpoint_snitch = software_config.get('endpoint-snitch', 'SimpleSnitch') seeds = software_config.get('seeds', host_config['private-ip']) listen_address = software_config.get('listen-address', host_config['private-ip']) rpc_address = software_config.get('rpc-address', host_config['private-ip']) configfile = '{}/software/config/cassandra/cassandra.yaml'.format(os.getcwd()) tempfile = 'cassandra.yaml' configdata = open(configfile, 'r').read() configdata = configdata.replace('{{CLUSTER_NAME}}', cluster_name) configdata = configdata.replace('{{DATA_FILE_DIRECTORY}}', data_file_directory) configdata = configdata.replace('{{COMMIT_LOG_DIRECTORY}}', commit_log_directory) configdata = configdata.replace('{{SAVED_CACHES_DIRECTORY}}', saved_caches_directory) configdata = configdata.replace('{{ENDPOINT_SNITCH}}', endpoint_snitch) configdata = configdata.replace('{{SEEDS}}', seeds) configdata = configdata.replace('{{LISTEN_ADDRESS}}', listen_address) configdata = configdata.replace('{{RPC_ADDRESS}}', rpc_address) _file = open(tempfile, 'w') _file.write(configdata) _file.close() sudo('mkdir -p {0}; chown -R cassandra {0}'.format(data_file_directory)) sudo('mkdir -p {0}; chown -R cassandra {0}'.format(commit_log_directory)) sudo('mkdir -p {0}; chown -R cassandra {0}'.format(saved_caches_directory)) put('cassandra.yaml'.format(os.getcwd()), '/etc/cassandra/cassandra.yaml', use_sudo=True) sudo('sudo pkill -f CassandraDaemon', warn_only=True) sudo('service cassandra restart') os.remove(tempfile)
def db_install(host_config, config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'citusdb') db_name = software_config.get('db-name') db_user = software_config.get('db-user') db_password = software_config.get('db-password') data_dir = software_config.get('data-dir') postgres_config = '/etc/postgresql/9.6/main/postgresql.conf' put('{}/software/scripts/citusdb.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo(". ~/citusdb.sh") sudo('sudo service postgresql stop') for mount in data_dir.split(','): sudo('chown -R postgres:postgres {}'.format(mount)) sudo('sudo sed -i.bu "s/data_directory/#data_directory/" {}'.format(postgres_config)) sudo('echo "data_directory = \'{}\'" | sudo tee -a {}'.format(data_dir, postgres_config)) sudo('sudo -u postgres bash -c "/usr/lib/postgresql/9.6/bin/initdb -D {}"'.format(data_dir)) sudo('service postgresql start') sudo('sudo pg_conftool 9.6 main set shared_preload_libraries citus') sudo("sudo pg_conftool 9.6 main set listen_addresses '*'") sudo("cp /etc/postgresql/9.6/main/pg_hba.conf /etc/postgresql/9.6/main/pg_hba.conf.backup") sudo('echo "##### Custom Configuration ######" | sudo tee /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "local all postgres peer" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "local all all peer" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "host all all 10.0.0.0/8 trust" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "host all all 127.0.0.1/32 trust" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "host all all ::1/128 trust" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('echo "host all all 0.0.0.0/0 md5" | sudo tee -a /etc/postgresql/9.6/main/pg_hba.conf') sudo('update-rc.d postgresql enable') sudo('sudo service postgresql restart') sudo('sudo -i -u postgres psql -c "CREATE EXTENSION citus;"') sudo('sudo -u postgres psql -c \"CREATE USER {} WITH PASSWORD \'{}\'\";'.format(db_user, db_password)) sudo('sudo -u postgres psql -c \"ALTER USER {} WITH SUPERUSER\";'.format(db_user)) sudo('sudo -u postgres psql -c \"CREATE DATABASE {} OWNER {}\";'.format(db_name, db_user))